# File: transformers-main/benchmark/benchmark.py """""" import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): reports = glob.glob(os.path.join(run_dir, '**/benchmark_report.json'), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search('/commit=([^/]+)', report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, 'benchmark.json')): continue benchmark = Benchmark.from_json(os.path.join(report_dir, 'benchmark.json')) report = benchmark.report model = benchmark.config.backend['model'] benchmark_name = re.sub(f'backend.model={model},*', '', report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith('commit='): benchmark_name = benchmark.config.name metrics_values = {} for metric in metrics: keys = metric.split('.') value = report.to_dict() current = metrics_values for key in keys: if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value print(f'model: {model}') print(f'commit: {commit}') print(f'config: {benchmark_name}') if len(metrics_values) > 0: print('metrics:') if expand_metrics: print(metrics_values) else: for (metric, value) in metrics_values.items(): print(f' - {metric}: {value}') print('-' * 80) summary = {'model': model, 'commit': commit, 'config': benchmark_name, 'metrics': metrics_values} summaries.append(summary) with open(os.path.join(report_dir, 'summary.json'), 'w') as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): combined = {} for summary in summaries: model = summary['model'] config = summary['config'] commit = summary['commit'] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {'metrics': summary['metrics']} with open(os.path.join(exp_run_dir, 'summary.json'), 'w') as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == '__main__': def list_str(values): return values.split(',') parser = argparse.ArgumentParser() parser.add_argument('--config-dir', type=str, required=True, help='The path to the config directory.') parser.add_argument('--config-name', type=str, required=True, help='The config name.') parser.add_argument('--ensure_empty', type=bool, default=True, help='If to create a temporary directory.') parser.add_argument('--commit', type=list_str, default='', help='Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.') parser.add_argument('--metrics', type=str, help='The metrics to be included in the summary.') parser.add_argument('--repo_id', type=str, default=None, help='The repository to which the file will be uploaded.') parser.add_argument('--path_in_repo', type=str, default=None, help='Relative filepath in the repo.') parser.add_argument('--token', type=str, default=None, help='A valid user access token (string).') (args, optimum_benchmark_args) = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = ['prefill.latency.mean', 'prefill.throughput.value', 'decode.latency.mean', 'decode.throughput.value', 'per_token.latency.mean', 'per_token.throughput.value'] if args.metrics is not None: metrics = args.metrics.split(',') models = [''] for (idx, arg) in enumerate(optimum_benchmark_args): if arg.startswith('backend.model='): models = arg[len('backend.model='):] models = models.split(',') break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith('backend.model=')] current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ''] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == 'diff': commits = ['main', current_head] (run_dir_arg_idx, run_dir) = (-1, None) (sweep_dir_arg_idx, sweep_dir) = (-1, None) for (idx, arg) in enumerate(optimum_benchmark_args): if arg.startswith('hydra.run.dir='): run_dir = arg[len('hydra.run.dir='):] run_dir_arg_idx = idx elif arg.startswith('hydra.sweep.dir='): sweep_dir = arg[len('hydra.sweep.dir='):] sweep_dir_arg_idx = idx (exp_run_dir, arg_dix, arg_name) = (sweep_dir, sweep_dir_arg_idx, 'hydra.sweep.dir') if '--multirun' in optimum_benchmark_args else (run_dir, run_dir_arg_idx, 'hydra.run.dir') if exp_run_dir is None and args.ensure_empty: exp_run_dir = '_benchmark' if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, f'commit\\={commit}') print(f'Run benchmark on commit: {commit}') for model in models: model_arg = [f'backend.model={model}'] if model != '' else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f'{arg_name}={commit_run_dir}' else: dir_args = [f'hydra.sweep.dir={commit_run_dir}', f'hydra.run.dir={commit_run_dir}/' + '${hydra.job.override_dirname}'] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: summaries = summarize(commit_run_dir.replace('\\', ''), metrics) run_summaries.extend(summaries) if exp_run_dir is not None: with open(os.path.join(exp_run_dir, 'summaries.json'), 'w') as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: api = HfApi() api.upload_folder(folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type='dataset', token=args.token) # File: transformers-main/benchmark/optimum_benchmark_wrapper.py import argparse import subprocess def main(config_dir, config_name, args): subprocess.run(['optimum-benchmark', '--config-dir', f'{config_dir}', '--config-name', f'{config_name}'] + ['hydra/job_logging=disabled', 'hydra/hydra_logging=disabled'] + args) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--config-dir', type=str, required=True, help='The path to the config directory.') parser.add_argument('--config-name', type=str, required=True, help='The config name.') (args, unknown) = parser.parse_known_args() main(args.config_dir, args.config_name, unknown) # File: transformers-main/src/transformers/__init__.py __version__ = '4.45.0.dev0' from typing import TYPE_CHECKING from . import dependency_versions_check from .utils import OptionalDependencyNotAvailable, _LazyModule, is_bitsandbytes_available, is_essentia_available, is_flax_available, is_g2p_en_available, is_keras_nlp_available, is_librosa_available, is_pretty_midi_available, is_scipy_available, is_sentencepiece_available, is_speech_available, is_tensorflow_text_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torchaudio_available, is_torchvision_available, is_vision_available, logging logger = logging.get_logger(__name__) _import_structure = {'agents': ['Agent', 'CodeAgent', 'HfApiEngine', 'ManagedAgent', 'PipelineTool', 'ReactAgent', 'ReactCodeAgent', 'ReactJsonAgent', 'Tool', 'Toolbox', 'ToolCollection', 'TransformersEngine', 'launch_gradio_demo', 'load_tool', 'stream_to_gradio'], 'audio_utils': [], 'benchmark': [], 'commands': [], 'configuration_utils': ['PretrainedConfig'], 'convert_graph_to_onnx': [], 'convert_slow_tokenizers_checkpoints_to_fast': [], 'convert_tf_hub_seq_to_seq_bert_to_pytorch': [], 'data': ['DataProcessor', 'InputExample', 'InputFeatures', 'SingleSentenceClassificationProcessor', 'SquadExample', 'SquadFeatures', 'SquadV1Processor', 'SquadV2Processor', 'glue_compute_metrics', 'glue_convert_examples_to_features', 'glue_output_modes', 'glue_processors', 'glue_tasks_num_labels', 'squad_convert_examples_to_features', 'xnli_compute_metrics', 'xnli_output_modes', 'xnli_processors', 'xnli_tasks_num_labels'], 'data.data_collator': ['DataCollator', 'DataCollatorForLanguageModeling', 'DataCollatorForPermutationLanguageModeling', 'DataCollatorForSeq2Seq', 'DataCollatorForSOP', 'DataCollatorForTokenClassification', 'DataCollatorForWholeWordMask', 'DataCollatorWithFlattening', 'DataCollatorWithPadding', 'DefaultDataCollator', 'default_data_collator'], 'data.metrics': [], 'data.processors': [], 'debug_utils': [], 'deepspeed': [], 'dependency_versions_check': [], 'dependency_versions_table': [], 'dynamic_module_utils': [], 'feature_extraction_sequence_utils': ['SequenceFeatureExtractor'], 'feature_extraction_utils': ['BatchFeature', 'FeatureExtractionMixin'], 'file_utils': [], 'generation': ['GenerationConfig', 'TextIteratorStreamer', 'TextStreamer', 'WatermarkingConfig'], 'hf_argparser': ['HfArgumentParser'], 'hyperparameter_search': [], 'image_transforms': [], 'integrations': ['is_clearml_available', 'is_comet_available', 'is_dvclive_available', 'is_neptune_available', 'is_optuna_available', 'is_ray_available', 'is_ray_tune_available', 'is_sigopt_available', 'is_tensorboard_available', 'is_wandb_available'], 'modelcard': ['ModelCard'], 'modeling_tf_pytorch_utils': ['convert_tf_weight_name_to_pt_weight_name', 'load_pytorch_checkpoint_in_tf2_model', 'load_pytorch_model_in_tf2_model', 'load_pytorch_weights_in_tf2_model', 'load_tf2_checkpoint_in_pytorch_model', 'load_tf2_model_in_pytorch_model', 'load_tf2_weights_in_pytorch_model'], 'models': [], 'models.albert': ['AlbertConfig'], 'models.align': ['AlignConfig', 'AlignProcessor', 'AlignTextConfig', 'AlignVisionConfig'], 'models.altclip': ['AltCLIPConfig', 'AltCLIPProcessor', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'], 'models.audio_spectrogram_transformer': ['ASTConfig', 'ASTFeatureExtractor'], 'models.auto': ['CONFIG_MAPPING', 'FEATURE_EXTRACTOR_MAPPING', 'IMAGE_PROCESSOR_MAPPING', 'MODEL_NAMES_MAPPING', 'PROCESSOR_MAPPING', 'TOKENIZER_MAPPING', 'AutoConfig', 'AutoFeatureExtractor', 'AutoImageProcessor', 'AutoProcessor', 'AutoTokenizer'], 'models.autoformer': ['AutoformerConfig'], 'models.bark': ['BarkCoarseConfig', 'BarkConfig', 'BarkFineConfig', 'BarkProcessor', 'BarkSemanticConfig'], 'models.bart': ['BartConfig', 'BartTokenizer'], 'models.barthez': [], 'models.bartpho': [], 'models.beit': ['BeitConfig'], 'models.bert': ['BasicTokenizer', 'BertConfig', 'BertTokenizer', 'WordpieceTokenizer'], 'models.bert_generation': ['BertGenerationConfig'], 'models.bert_japanese': ['BertJapaneseTokenizer', 'CharacterTokenizer', 'MecabTokenizer'], 'models.bertweet': ['BertweetTokenizer'], 'models.big_bird': ['BigBirdConfig'], 'models.bigbird_pegasus': ['BigBirdPegasusConfig'], 'models.biogpt': ['BioGptConfig', 'BioGptTokenizer'], 'models.bit': ['BitConfig'], 'models.blenderbot': ['BlenderbotConfig', 'BlenderbotTokenizer'], 'models.blenderbot_small': ['BlenderbotSmallConfig', 'BlenderbotSmallTokenizer'], 'models.blip': ['BlipConfig', 'BlipProcessor', 'BlipTextConfig', 'BlipVisionConfig'], 'models.blip_2': ['Blip2Config', 'Blip2Processor', 'Blip2QFormerConfig', 'Blip2VisionConfig'], 'models.bloom': ['BloomConfig'], 'models.bridgetower': ['BridgeTowerConfig', 'BridgeTowerProcessor', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig'], 'models.bros': ['BrosConfig', 'BrosProcessor'], 'models.byt5': ['ByT5Tokenizer'], 'models.camembert': ['CamembertConfig'], 'models.canine': ['CanineConfig', 'CanineTokenizer'], 'models.chameleon': ['ChameleonConfig', 'ChameleonProcessor', 'ChameleonVQVAEConfig'], 'models.chinese_clip': ['ChineseCLIPConfig', 'ChineseCLIPProcessor', 'ChineseCLIPTextConfig', 'ChineseCLIPVisionConfig'], 'models.clap': ['ClapAudioConfig', 'ClapConfig', 'ClapProcessor', 'ClapTextConfig'], 'models.clip': ['CLIPConfig', 'CLIPProcessor', 'CLIPTextConfig', 'CLIPTokenizer', 'CLIPVisionConfig'], 'models.clipseg': ['CLIPSegConfig', 'CLIPSegProcessor', 'CLIPSegTextConfig', 'CLIPSegVisionConfig'], 'models.clvp': ['ClvpConfig', 'ClvpDecoderConfig', 'ClvpEncoderConfig', 'ClvpFeatureExtractor', 'ClvpProcessor', 'ClvpTokenizer'], 'models.code_llama': [], 'models.codegen': ['CodeGenConfig', 'CodeGenTokenizer'], 'models.cohere': ['CohereConfig'], 'models.conditional_detr': ['ConditionalDetrConfig'], 'models.convbert': ['ConvBertConfig', 'ConvBertTokenizer'], 'models.convnext': ['ConvNextConfig'], 'models.convnextv2': ['ConvNextV2Config'], 'models.cpm': [], 'models.cpmant': ['CpmAntConfig', 'CpmAntTokenizer'], 'models.ctrl': ['CTRLConfig', 'CTRLTokenizer'], 'models.cvt': ['CvtConfig'], 'models.dac': ['DacConfig', 'DacFeatureExtractor'], 'models.data2vec': ['Data2VecAudioConfig', 'Data2VecTextConfig', 'Data2VecVisionConfig'], 'models.dbrx': ['DbrxConfig'], 'models.deberta': ['DebertaConfig', 'DebertaTokenizer'], 'models.deberta_v2': ['DebertaV2Config'], 'models.decision_transformer': ['DecisionTransformerConfig'], 'models.deformable_detr': ['DeformableDetrConfig'], 'models.deit': ['DeiTConfig'], 'models.deprecated': [], 'models.deprecated.bort': [], 'models.deprecated.deta': ['DetaConfig'], 'models.deprecated.efficientformer': ['EfficientFormerConfig'], 'models.deprecated.ernie_m': ['ErnieMConfig'], 'models.deprecated.gptsan_japanese': ['GPTSanJapaneseConfig', 'GPTSanJapaneseTokenizer'], 'models.deprecated.graphormer': ['GraphormerConfig'], 'models.deprecated.jukebox': ['JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxTokenizer', 'JukeboxVQVAEConfig'], 'models.deprecated.mctct': ['MCTCTConfig', 'MCTCTFeatureExtractor', 'MCTCTProcessor'], 'models.deprecated.mega': ['MegaConfig'], 'models.deprecated.mmbt': ['MMBTConfig'], 'models.deprecated.nat': ['NatConfig'], 'models.deprecated.nezha': ['NezhaConfig'], 'models.deprecated.open_llama': ['OpenLlamaConfig'], 'models.deprecated.qdqbert': ['QDQBertConfig'], 'models.deprecated.realm': ['RealmConfig', 'RealmTokenizer'], 'models.deprecated.retribert': ['RetriBertConfig', 'RetriBertTokenizer'], 'models.deprecated.speech_to_text_2': ['Speech2Text2Config', 'Speech2Text2Processor', 'Speech2Text2Tokenizer'], 'models.deprecated.tapex': ['TapexTokenizer'], 'models.deprecated.trajectory_transformer': ['TrajectoryTransformerConfig'], 'models.deprecated.transfo_xl': ['TransfoXLConfig', 'TransfoXLCorpus', 'TransfoXLTokenizer'], 'models.deprecated.tvlt': ['TvltConfig', 'TvltFeatureExtractor', 'TvltProcessor'], 'models.deprecated.van': ['VanConfig'], 'models.deprecated.vit_hybrid': ['ViTHybridConfig'], 'models.deprecated.xlm_prophetnet': ['XLMProphetNetConfig'], 'models.depth_anything': ['DepthAnythingConfig'], 'models.detr': ['DetrConfig'], 'models.dialogpt': [], 'models.dinat': ['DinatConfig'], 'models.dinov2': ['Dinov2Config'], 'models.distilbert': ['DistilBertConfig', 'DistilBertTokenizer'], 'models.dit': [], 'models.donut': ['DonutProcessor', 'DonutSwinConfig'], 'models.dpr': ['DPRConfig', 'DPRContextEncoderTokenizer', 'DPRQuestionEncoderTokenizer', 'DPRReaderOutput', 'DPRReaderTokenizer'], 'models.dpt': ['DPTConfig'], 'models.efficientnet': ['EfficientNetConfig'], 'models.electra': ['ElectraConfig', 'ElectraTokenizer'], 'models.encodec': ['EncodecConfig', 'EncodecFeatureExtractor'], 'models.encoder_decoder': ['EncoderDecoderConfig'], 'models.ernie': ['ErnieConfig'], 'models.esm': ['EsmConfig', 'EsmTokenizer'], 'models.falcon': ['FalconConfig'], 'models.falcon_mamba': ['FalconMambaConfig'], 'models.fastspeech2_conformer': ['FastSpeech2ConformerConfig', 'FastSpeech2ConformerHifiGanConfig', 'FastSpeech2ConformerTokenizer', 'FastSpeech2ConformerWithHifiGanConfig'], 'models.flaubert': ['FlaubertConfig', 'FlaubertTokenizer'], 'models.flava': ['FlavaConfig', 'FlavaImageCodebookConfig', 'FlavaImageConfig', 'FlavaMultimodalConfig', 'FlavaTextConfig'], 'models.fnet': ['FNetConfig'], 'models.focalnet': ['FocalNetConfig'], 'models.fsmt': ['FSMTConfig', 'FSMTTokenizer'], 'models.funnel': ['FunnelConfig', 'FunnelTokenizer'], 'models.fuyu': ['FuyuConfig'], 'models.gemma': ['GemmaConfig'], 'models.gemma2': ['Gemma2Config'], 'models.git': ['GitConfig', 'GitProcessor', 'GitVisionConfig'], 'models.glpn': ['GLPNConfig'], 'models.gpt2': ['GPT2Config', 'GPT2Tokenizer'], 'models.gpt_bigcode': ['GPTBigCodeConfig'], 'models.gpt_neo': ['GPTNeoConfig'], 'models.gpt_neox': ['GPTNeoXConfig'], 'models.gpt_neox_japanese': ['GPTNeoXJapaneseConfig'], 'models.gpt_sw3': [], 'models.gptj': ['GPTJConfig'], 'models.granite': ['GraniteConfig'], 'models.grounding_dino': ['GroundingDinoConfig', 'GroundingDinoProcessor'], 'models.groupvit': ['GroupViTConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig'], 'models.herbert': ['HerbertTokenizer'], 'models.hiera': ['HieraConfig'], 'models.hubert': ['HubertConfig'], 'models.ibert': ['IBertConfig'], 'models.idefics': ['IdeficsConfig'], 'models.idefics2': ['Idefics2Config'], 'models.imagegpt': ['ImageGPTConfig'], 'models.informer': ['InformerConfig'], 'models.instructblip': ['InstructBlipConfig', 'InstructBlipProcessor', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig'], 'models.instructblipvideo': ['InstructBlipVideoConfig', 'InstructBlipVideoProcessor', 'InstructBlipVideoQFormerConfig', 'InstructBlipVideoVisionConfig'], 'models.jamba': ['JambaConfig'], 'models.jetmoe': ['JetMoeConfig'], 'models.kosmos2': ['Kosmos2Config', 'Kosmos2Processor'], 'models.layoutlm': ['LayoutLMConfig', 'LayoutLMTokenizer'], 'models.layoutlmv2': ['LayoutLMv2Config', 'LayoutLMv2FeatureExtractor', 'LayoutLMv2ImageProcessor', 'LayoutLMv2Processor', 'LayoutLMv2Tokenizer'], 'models.layoutlmv3': ['LayoutLMv3Config', 'LayoutLMv3FeatureExtractor', 'LayoutLMv3ImageProcessor', 'LayoutLMv3Processor', 'LayoutLMv3Tokenizer'], 'models.layoutxlm': ['LayoutXLMProcessor'], 'models.led': ['LEDConfig', 'LEDTokenizer'], 'models.levit': ['LevitConfig'], 'models.lilt': ['LiltConfig'], 'models.llama': ['LlamaConfig'], 'models.llava': ['LlavaConfig', 'LlavaProcessor'], 'models.llava_next': ['LlavaNextConfig', 'LlavaNextProcessor'], 'models.llava_next_video': ['LlavaNextVideoConfig', 'LlavaNextVideoProcessor'], 'models.llava_onevision': ['LlavaOnevisionConfig', 'LlavaOnevisionProcessor'], 'models.longformer': ['LongformerConfig', 'LongformerTokenizer'], 'models.longt5': ['LongT5Config'], 'models.luke': ['LukeConfig', 'LukeTokenizer'], 'models.lxmert': ['LxmertConfig', 'LxmertTokenizer'], 'models.m2m_100': ['M2M100Config'], 'models.mamba': ['MambaConfig'], 'models.mamba2': ['Mamba2Config'], 'models.marian': ['MarianConfig'], 'models.markuplm': ['MarkupLMConfig', 'MarkupLMFeatureExtractor', 'MarkupLMProcessor', 'MarkupLMTokenizer'], 'models.mask2former': ['Mask2FormerConfig'], 'models.maskformer': ['MaskFormerConfig', 'MaskFormerSwinConfig'], 'models.mbart': ['MBartConfig'], 'models.mbart50': [], 'models.megatron_bert': ['MegatronBertConfig'], 'models.megatron_gpt2': [], 'models.mgp_str': ['MgpstrConfig', 'MgpstrProcessor', 'MgpstrTokenizer'], 'models.mistral': ['MistralConfig'], 'models.mixtral': ['MixtralConfig'], 'models.mluke': [], 'models.mobilebert': ['MobileBertConfig', 'MobileBertTokenizer'], 'models.mobilenet_v1': ['MobileNetV1Config'], 'models.mobilenet_v2': ['MobileNetV2Config'], 'models.mobilevit': ['MobileViTConfig'], 'models.mobilevitv2': ['MobileViTV2Config'], 'models.mpnet': ['MPNetConfig', 'MPNetTokenizer'], 'models.mpt': ['MptConfig'], 'models.mra': ['MraConfig'], 'models.mt5': ['MT5Config'], 'models.musicgen': ['MusicgenConfig', 'MusicgenDecoderConfig'], 'models.musicgen_melody': ['MusicgenMelodyConfig', 'MusicgenMelodyDecoderConfig'], 'models.mvp': ['MvpConfig', 'MvpTokenizer'], 'models.nemotron': ['NemotronConfig'], 'models.nllb': [], 'models.nllb_moe': ['NllbMoeConfig'], 'models.nougat': ['NougatProcessor'], 'models.nystromformer': ['NystromformerConfig'], 'models.olmo': ['OlmoConfig'], 'models.olmoe': ['OlmoeConfig'], 'models.oneformer': ['OneFormerConfig', 'OneFormerProcessor'], 'models.openai': ['OpenAIGPTConfig', 'OpenAIGPTTokenizer'], 'models.opt': ['OPTConfig'], 'models.owlv2': ['Owlv2Config', 'Owlv2Processor', 'Owlv2TextConfig', 'Owlv2VisionConfig'], 'models.owlvit': ['OwlViTConfig', 'OwlViTProcessor', 'OwlViTTextConfig', 'OwlViTVisionConfig'], 'models.paligemma': ['PaliGemmaConfig'], 'models.patchtsmixer': ['PatchTSMixerConfig'], 'models.patchtst': ['PatchTSTConfig'], 'models.pegasus': ['PegasusConfig', 'PegasusTokenizer'], 'models.pegasus_x': ['PegasusXConfig'], 'models.perceiver': ['PerceiverConfig', 'PerceiverTokenizer'], 'models.persimmon': ['PersimmonConfig'], 'models.phi': ['PhiConfig'], 'models.phi3': ['Phi3Config'], 'models.phobert': ['PhobertTokenizer'], 'models.pix2struct': ['Pix2StructConfig', 'Pix2StructProcessor', 'Pix2StructTextConfig', 'Pix2StructVisionConfig'], 'models.pixtral': ['PixtralProcessor', 'PixtralVisionConfig'], 'models.plbart': ['PLBartConfig'], 'models.poolformer': ['PoolFormerConfig'], 'models.pop2piano': ['Pop2PianoConfig'], 'models.prophetnet': ['ProphetNetConfig', 'ProphetNetTokenizer'], 'models.pvt': ['PvtConfig'], 'models.pvt_v2': ['PvtV2Config'], 'models.qwen2': ['Qwen2Config', 'Qwen2Tokenizer'], 'models.qwen2_audio': ['Qwen2AudioConfig', 'Qwen2AudioEncoderConfig', 'Qwen2AudioProcessor'], 'models.qwen2_moe': ['Qwen2MoeConfig'], 'models.qwen2_vl': ['Qwen2VLConfig', 'Qwen2VLProcessor'], 'models.rag': ['RagConfig', 'RagRetriever', 'RagTokenizer'], 'models.recurrent_gemma': ['RecurrentGemmaConfig'], 'models.reformer': ['ReformerConfig'], 'models.regnet': ['RegNetConfig'], 'models.rembert': ['RemBertConfig'], 'models.resnet': ['ResNetConfig'], 'models.roberta': ['RobertaConfig', 'RobertaTokenizer'], 'models.roberta_prelayernorm': ['RobertaPreLayerNormConfig'], 'models.roc_bert': ['RoCBertConfig', 'RoCBertTokenizer'], 'models.roformer': ['RoFormerConfig', 'RoFormerTokenizer'], 'models.rt_detr': ['RTDetrConfig', 'RTDetrResNetConfig'], 'models.rwkv': ['RwkvConfig'], 'models.sam': ['SamConfig', 'SamMaskDecoderConfig', 'SamProcessor', 'SamPromptEncoderConfig', 'SamVisionConfig'], 'models.seamless_m4t': ['SeamlessM4TConfig', 'SeamlessM4TFeatureExtractor', 'SeamlessM4TProcessor'], 'models.seamless_m4t_v2': ['SeamlessM4Tv2Config'], 'models.segformer': ['SegformerConfig'], 'models.seggpt': ['SegGptConfig'], 'models.sew': ['SEWConfig'], 'models.sew_d': ['SEWDConfig'], 'models.siglip': ['SiglipConfig', 'SiglipProcessor', 'SiglipTextConfig', 'SiglipVisionConfig'], 'models.speech_encoder_decoder': ['SpeechEncoderDecoderConfig'], 'models.speech_to_text': ['Speech2TextConfig', 'Speech2TextFeatureExtractor', 'Speech2TextProcessor'], 'models.speecht5': ['SpeechT5Config', 'SpeechT5FeatureExtractor', 'SpeechT5HifiGanConfig', 'SpeechT5Processor'], 'models.splinter': ['SplinterConfig', 'SplinterTokenizer'], 'models.squeezebert': ['SqueezeBertConfig', 'SqueezeBertTokenizer'], 'models.stablelm': ['StableLmConfig'], 'models.starcoder2': ['Starcoder2Config'], 'models.superpoint': ['SuperPointConfig'], 'models.swiftformer': ['SwiftFormerConfig'], 'models.swin': ['SwinConfig'], 'models.swin2sr': ['Swin2SRConfig'], 'models.swinv2': ['Swinv2Config'], 'models.switch_transformers': ['SwitchTransformersConfig'], 'models.t5': ['T5Config'], 'models.table_transformer': ['TableTransformerConfig'], 'models.tapas': ['TapasConfig', 'TapasTokenizer'], 'models.time_series_transformer': ['TimeSeriesTransformerConfig'], 'models.timesformer': ['TimesformerConfig'], 'models.timm_backbone': ['TimmBackboneConfig'], 'models.trocr': ['TrOCRConfig', 'TrOCRProcessor'], 'models.tvp': ['TvpConfig', 'TvpProcessor'], 'models.udop': ['UdopConfig', 'UdopProcessor'], 'models.umt5': ['UMT5Config'], 'models.unispeech': ['UniSpeechConfig'], 'models.unispeech_sat': ['UniSpeechSatConfig'], 'models.univnet': ['UnivNetConfig', 'UnivNetFeatureExtractor'], 'models.upernet': ['UperNetConfig'], 'models.video_llava': ['VideoLlavaConfig'], 'models.videomae': ['VideoMAEConfig'], 'models.vilt': ['ViltConfig', 'ViltFeatureExtractor', 'ViltImageProcessor', 'ViltProcessor'], 'models.vipllava': ['VipLlavaConfig'], 'models.vision_encoder_decoder': ['VisionEncoderDecoderConfig'], 'models.vision_text_dual_encoder': ['VisionTextDualEncoderConfig', 'VisionTextDualEncoderProcessor'], 'models.visual_bert': ['VisualBertConfig'], 'models.vit': ['ViTConfig'], 'models.vit_mae': ['ViTMAEConfig'], 'models.vit_msn': ['ViTMSNConfig'], 'models.vitdet': ['VitDetConfig'], 'models.vitmatte': ['VitMatteConfig'], 'models.vits': ['VitsConfig', 'VitsTokenizer'], 'models.vivit': ['VivitConfig'], 'models.wav2vec2': ['Wav2Vec2Config', 'Wav2Vec2CTCTokenizer', 'Wav2Vec2FeatureExtractor', 'Wav2Vec2Processor', 'Wav2Vec2Tokenizer'], 'models.wav2vec2_bert': ['Wav2Vec2BertConfig', 'Wav2Vec2BertProcessor'], 'models.wav2vec2_conformer': ['Wav2Vec2ConformerConfig'], 'models.wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer'], 'models.wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM'], 'models.wavlm': ['WavLMConfig'], 'models.whisper': ['WhisperConfig', 'WhisperFeatureExtractor', 'WhisperProcessor', 'WhisperTokenizer'], 'models.x_clip': ['XCLIPConfig', 'XCLIPProcessor', 'XCLIPTextConfig', 'XCLIPVisionConfig'], 'models.xglm': ['XGLMConfig'], 'models.xlm': ['XLMConfig', 'XLMTokenizer'], 'models.xlm_roberta': ['XLMRobertaConfig'], 'models.xlm_roberta_xl': ['XLMRobertaXLConfig'], 'models.xlnet': ['XLNetConfig'], 'models.xmod': ['XmodConfig'], 'models.yolos': ['YolosConfig'], 'models.yoso': ['YosoConfig'], 'models.zoedepth': ['ZoeDepthConfig'], 'onnx': [], 'pipelines': ['AudioClassificationPipeline', 'AutomaticSpeechRecognitionPipeline', 'CsvPipelineDataFormat', 'DepthEstimationPipeline', 'DocumentQuestionAnsweringPipeline', 'FeatureExtractionPipeline', 'FillMaskPipeline', 'ImageClassificationPipeline', 'ImageFeatureExtractionPipeline', 'ImageSegmentationPipeline', 'ImageToImagePipeline', 'ImageToTextPipeline', 'JsonPipelineDataFormat', 'MaskGenerationPipeline', 'NerPipeline', 'ObjectDetectionPipeline', 'PipedPipelineDataFormat', 'Pipeline', 'PipelineDataFormat', 'QuestionAnsweringPipeline', 'SummarizationPipeline', 'TableQuestionAnsweringPipeline', 'Text2TextGenerationPipeline', 'TextClassificationPipeline', 'TextGenerationPipeline', 'TextToAudioPipeline', 'TokenClassificationPipeline', 'TranslationPipeline', 'VideoClassificationPipeline', 'VisualQuestionAnsweringPipeline', 'ZeroShotAudioClassificationPipeline', 'ZeroShotClassificationPipeline', 'ZeroShotImageClassificationPipeline', 'ZeroShotObjectDetectionPipeline', 'pipeline'], 'processing_utils': ['ProcessorMixin'], 'quantizers': [], 'testing_utils': [], 'tokenization_utils': ['PreTrainedTokenizer'], 'tokenization_utils_base': ['AddedToken', 'BatchEncoding', 'CharSpan', 'PreTrainedTokenizerBase', 'SpecialTokensMixin', 'TokenSpan'], 'trainer_callback': ['DefaultFlowCallback', 'EarlyStoppingCallback', 'PrinterCallback', 'ProgressCallback', 'TrainerCallback', 'TrainerControl', 'TrainerState'], 'trainer_utils': ['EvalPrediction', 'IntervalStrategy', 'SchedulerType', 'enable_full_determinism', 'set_seed'], 'training_args': ['TrainingArguments'], 'training_args_seq2seq': ['Seq2SeqTrainingArguments'], 'training_args_tf': ['TFTrainingArguments'], 'utils': ['CONFIG_NAME', 'MODEL_CARD_NAME', 'PYTORCH_PRETRAINED_BERT_CACHE', 'PYTORCH_TRANSFORMERS_CACHE', 'SPIECE_UNDERLINE', 'TF2_WEIGHTS_NAME', 'TF_WEIGHTS_NAME', 'TRANSFORMERS_CACHE', 'WEIGHTS_NAME', 'TensorType', 'add_end_docstrings', 'add_start_docstrings', 'is_apex_available', 'is_av_available', 'is_bitsandbytes_available', 'is_datasets_available', 'is_decord_available', 'is_faiss_available', 'is_flax_available', 'is_keras_nlp_available', 'is_phonemizer_available', 'is_psutil_available', 'is_py3nvml_available', 'is_pyctcdecode_available', 'is_sacremoses_available', 'is_safetensors_available', 'is_scipy_available', 'is_sentencepiece_available', 'is_sklearn_available', 'is_speech_available', 'is_tensorflow_text_available', 'is_tf_available', 'is_timm_available', 'is_tokenizers_available', 'is_torch_available', 'is_torch_mlu_available', 'is_torch_musa_available', 'is_torch_neuroncore_available', 'is_torch_npu_available', 'is_torch_tpu_available', 'is_torchvision_available', 'is_torch_xla_available', 'is_torch_xpu_available', 'is_vision_available', 'logging'], 'utils.quantization_config': ['AqlmConfig', 'AwqConfig', 'BitsAndBytesConfig', 'EetqConfig', 'FbgemmFp8Config', 'GPTQConfig', 'HqqConfig', 'QuantoConfig', 'TorchAoConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_sentencepiece_objects _import_structure['utils.dummy_sentencepiece_objects'] = [name for name in dir(dummy_sentencepiece_objects) if not name.startswith('_')] else: _import_structure['models.albert'].append('AlbertTokenizer') _import_structure['models.barthez'].append('BarthezTokenizer') _import_structure['models.bartpho'].append('BartphoTokenizer') _import_structure['models.bert_generation'].append('BertGenerationTokenizer') _import_structure['models.big_bird'].append('BigBirdTokenizer') _import_structure['models.camembert'].append('CamembertTokenizer') _import_structure['models.code_llama'].append('CodeLlamaTokenizer') _import_structure['models.cpm'].append('CpmTokenizer') _import_structure['models.deberta_v2'].append('DebertaV2Tokenizer') _import_structure['models.deprecated.ernie_m'].append('ErnieMTokenizer') _import_structure['models.deprecated.xlm_prophetnet'].append('XLMProphetNetTokenizer') _import_structure['models.fnet'].append('FNetTokenizer') _import_structure['models.gemma'].append('GemmaTokenizer') _import_structure['models.gpt_sw3'].append('GPTSw3Tokenizer') _import_structure['models.layoutxlm'].append('LayoutXLMTokenizer') _import_structure['models.llama'].append('LlamaTokenizer') _import_structure['models.m2m_100'].append('M2M100Tokenizer') _import_structure['models.marian'].append('MarianTokenizer') _import_structure['models.mbart'].append('MBartTokenizer') _import_structure['models.mbart50'].append('MBart50Tokenizer') _import_structure['models.mluke'].append('MLukeTokenizer') _import_structure['models.mt5'].append('MT5Tokenizer') _import_structure['models.nllb'].append('NllbTokenizer') _import_structure['models.pegasus'].append('PegasusTokenizer') _import_structure['models.plbart'].append('PLBartTokenizer') _import_structure['models.reformer'].append('ReformerTokenizer') _import_structure['models.rembert'].append('RemBertTokenizer') _import_structure['models.seamless_m4t'].append('SeamlessM4TTokenizer') _import_structure['models.siglip'].append('SiglipTokenizer') _import_structure['models.speech_to_text'].append('Speech2TextTokenizer') _import_structure['models.speecht5'].append('SpeechT5Tokenizer') _import_structure['models.t5'].append('T5Tokenizer') _import_structure['models.udop'].append('UdopTokenizer') _import_structure['models.xglm'].append('XGLMTokenizer') _import_structure['models.xlm_roberta'].append('XLMRobertaTokenizer') _import_structure['models.xlnet'].append('XLNetTokenizer') try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_tokenizers_objects _import_structure['utils.dummy_tokenizers_objects'] = [name for name in dir(dummy_tokenizers_objects) if not name.startswith('_')] else: _import_structure['models.albert'].append('AlbertTokenizerFast') _import_structure['models.bart'].append('BartTokenizerFast') _import_structure['models.barthez'].append('BarthezTokenizerFast') _import_structure['models.bert'].append('BertTokenizerFast') _import_structure['models.big_bird'].append('BigBirdTokenizerFast') _import_structure['models.blenderbot'].append('BlenderbotTokenizerFast') _import_structure['models.blenderbot_small'].append('BlenderbotSmallTokenizerFast') _import_structure['models.bloom'].append('BloomTokenizerFast') _import_structure['models.camembert'].append('CamembertTokenizerFast') _import_structure['models.clip'].append('CLIPTokenizerFast') _import_structure['models.code_llama'].append('CodeLlamaTokenizerFast') _import_structure['models.codegen'].append('CodeGenTokenizerFast') _import_structure['models.cohere'].append('CohereTokenizerFast') _import_structure['models.convbert'].append('ConvBertTokenizerFast') _import_structure['models.cpm'].append('CpmTokenizerFast') _import_structure['models.deberta'].append('DebertaTokenizerFast') _import_structure['models.deberta_v2'].append('DebertaV2TokenizerFast') _import_structure['models.deprecated.realm'].append('RealmTokenizerFast') _import_structure['models.deprecated.retribert'].append('RetriBertTokenizerFast') _import_structure['models.distilbert'].append('DistilBertTokenizerFast') _import_structure['models.dpr'].extend(['DPRContextEncoderTokenizerFast', 'DPRQuestionEncoderTokenizerFast', 'DPRReaderTokenizerFast']) _import_structure['models.electra'].append('ElectraTokenizerFast') _import_structure['models.fnet'].append('FNetTokenizerFast') _import_structure['models.funnel'].append('FunnelTokenizerFast') _import_structure['models.gemma'].append('GemmaTokenizerFast') _import_structure['models.gpt2'].append('GPT2TokenizerFast') _import_structure['models.gpt_neox'].append('GPTNeoXTokenizerFast') _import_structure['models.gpt_neox_japanese'].append('GPTNeoXJapaneseTokenizer') _import_structure['models.herbert'].append('HerbertTokenizerFast') _import_structure['models.layoutlm'].append('LayoutLMTokenizerFast') _import_structure['models.layoutlmv2'].append('LayoutLMv2TokenizerFast') _import_structure['models.layoutlmv3'].append('LayoutLMv3TokenizerFast') _import_structure['models.layoutxlm'].append('LayoutXLMTokenizerFast') _import_structure['models.led'].append('LEDTokenizerFast') _import_structure['models.llama'].append('LlamaTokenizerFast') _import_structure['models.longformer'].append('LongformerTokenizerFast') _import_structure['models.lxmert'].append('LxmertTokenizerFast') _import_structure['models.markuplm'].append('MarkupLMTokenizerFast') _import_structure['models.mbart'].append('MBartTokenizerFast') _import_structure['models.mbart50'].append('MBart50TokenizerFast') _import_structure['models.mobilebert'].append('MobileBertTokenizerFast') _import_structure['models.mpnet'].append('MPNetTokenizerFast') _import_structure['models.mt5'].append('MT5TokenizerFast') _import_structure['models.mvp'].append('MvpTokenizerFast') _import_structure['models.nllb'].append('NllbTokenizerFast') _import_structure['models.nougat'].append('NougatTokenizerFast') _import_structure['models.openai'].append('OpenAIGPTTokenizerFast') _import_structure['models.pegasus'].append('PegasusTokenizerFast') _import_structure['models.qwen2'].append('Qwen2TokenizerFast') _import_structure['models.reformer'].append('ReformerTokenizerFast') _import_structure['models.rembert'].append('RemBertTokenizerFast') _import_structure['models.roberta'].append('RobertaTokenizerFast') _import_structure['models.roformer'].append('RoFormerTokenizerFast') _import_structure['models.seamless_m4t'].append('SeamlessM4TTokenizerFast') _import_structure['models.splinter'].append('SplinterTokenizerFast') _import_structure['models.squeezebert'].append('SqueezeBertTokenizerFast') _import_structure['models.t5'].append('T5TokenizerFast') _import_structure['models.udop'].append('UdopTokenizerFast') _import_structure['models.whisper'].append('WhisperTokenizerFast') _import_structure['models.xglm'].append('XGLMTokenizerFast') _import_structure['models.xlm_roberta'].append('XLMRobertaTokenizerFast') _import_structure['models.xlnet'].append('XLNetTokenizerFast') _import_structure['tokenization_utils_fast'] = ['PreTrainedTokenizerFast'] try: if not (is_sentencepiece_available() and is_tokenizers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_sentencepiece_and_tokenizers_objects _import_structure['utils.dummy_sentencepiece_and_tokenizers_objects'] = [name for name in dir(dummy_sentencepiece_and_tokenizers_objects) if not name.startswith('_')] else: _import_structure['convert_slow_tokenizer'] = ['SLOW_TO_FAST_CONVERTERS', 'convert_slow_tokenizer'] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_tensorflow_text_objects _import_structure['utils.dummy_tensorflow_text_objects'] = [name for name in dir(dummy_tensorflow_text_objects) if not name.startswith('_')] else: _import_structure['models.bert'].append('TFBertTokenizer') try: if not is_keras_nlp_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_keras_nlp_objects _import_structure['utils.dummy_keras_nlp_objects'] = [name for name in dir(dummy_keras_nlp_objects) if not name.startswith('_')] else: _import_structure['models.gpt2'].append('TFGPT2Tokenizer') try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_vision_objects _import_structure['utils.dummy_vision_objects'] = [name for name in dir(dummy_vision_objects) if not name.startswith('_')] else: _import_structure['image_processing_base'] = ['ImageProcessingMixin'] _import_structure['image_processing_utils'] = ['BaseImageProcessor'] _import_structure['image_utils'] = ['ImageFeatureExtractionMixin'] _import_structure['models.beit'].extend(['BeitFeatureExtractor', 'BeitImageProcessor']) _import_structure['models.bit'].extend(['BitImageProcessor']) _import_structure['models.blip'].extend(['BlipImageProcessor']) _import_structure['models.bridgetower'].append('BridgeTowerImageProcessor') _import_structure['models.chameleon'].append('ChameleonImageProcessor') _import_structure['models.chinese_clip'].extend(['ChineseCLIPFeatureExtractor', 'ChineseCLIPImageProcessor']) _import_structure['models.clip'].extend(['CLIPFeatureExtractor', 'CLIPImageProcessor']) _import_structure['models.conditional_detr'].extend(['ConditionalDetrFeatureExtractor', 'ConditionalDetrImageProcessor']) _import_structure['models.convnext'].extend(['ConvNextFeatureExtractor', 'ConvNextImageProcessor']) _import_structure['models.deformable_detr'].extend(['DeformableDetrFeatureExtractor', 'DeformableDetrImageProcessor']) _import_structure['models.deit'].extend(['DeiTFeatureExtractor', 'DeiTImageProcessor']) _import_structure['models.deprecated.deta'].append('DetaImageProcessor') _import_structure['models.deprecated.efficientformer'].append('EfficientFormerImageProcessor') _import_structure['models.deprecated.tvlt'].append('TvltImageProcessor') _import_structure['models.deprecated.vit_hybrid'].extend(['ViTHybridImageProcessor']) _import_structure['models.detr'].extend(['DetrFeatureExtractor', 'DetrImageProcessor']) _import_structure['models.donut'].extend(['DonutFeatureExtractor', 'DonutImageProcessor']) _import_structure['models.dpt'].extend(['DPTFeatureExtractor', 'DPTImageProcessor']) _import_structure['models.efficientnet'].append('EfficientNetImageProcessor') _import_structure['models.flava'].extend(['FlavaFeatureExtractor', 'FlavaImageProcessor', 'FlavaProcessor']) _import_structure['models.fuyu'].extend(['FuyuImageProcessor', 'FuyuProcessor']) _import_structure['models.glpn'].extend(['GLPNFeatureExtractor', 'GLPNImageProcessor']) _import_structure['models.grounding_dino'].extend(['GroundingDinoImageProcessor']) _import_structure['models.idefics'].extend(['IdeficsImageProcessor']) _import_structure['models.idefics2'].extend(['Idefics2ImageProcessor']) _import_structure['models.imagegpt'].extend(['ImageGPTFeatureExtractor', 'ImageGPTImageProcessor']) _import_structure['models.instructblipvideo'].extend(['InstructBlipVideoImageProcessor']) _import_structure['models.layoutlmv2'].extend(['LayoutLMv2FeatureExtractor', 'LayoutLMv2ImageProcessor']) _import_structure['models.layoutlmv3'].extend(['LayoutLMv3FeatureExtractor', 'LayoutLMv3ImageProcessor']) _import_structure['models.levit'].extend(['LevitFeatureExtractor', 'LevitImageProcessor']) _import_structure['models.llava_next'].append('LlavaNextImageProcessor') _import_structure['models.llava_next_video'].append('LlavaNextVideoImageProcessor') _import_structure['models.llava_onevision'].extend(['LlavaOnevisionImageProcessor', 'LlavaOnevisionVideoProcessor']) _import_structure['models.mask2former'].append('Mask2FormerImageProcessor') _import_structure['models.maskformer'].extend(['MaskFormerFeatureExtractor', 'MaskFormerImageProcessor']) _import_structure['models.mobilenet_v1'].extend(['MobileNetV1FeatureExtractor', 'MobileNetV1ImageProcessor']) _import_structure['models.mobilenet_v2'].extend(['MobileNetV2FeatureExtractor', 'MobileNetV2ImageProcessor']) _import_structure['models.mobilevit'].extend(['MobileViTFeatureExtractor', 'MobileViTImageProcessor']) _import_structure['models.nougat'].append('NougatImageProcessor') _import_structure['models.oneformer'].extend(['OneFormerImageProcessor']) _import_structure['models.owlv2'].append('Owlv2ImageProcessor') _import_structure['models.owlvit'].extend(['OwlViTFeatureExtractor', 'OwlViTImageProcessor']) _import_structure['models.perceiver'].extend(['PerceiverFeatureExtractor', 'PerceiverImageProcessor']) _import_structure['models.pix2struct'].extend(['Pix2StructImageProcessor']) _import_structure['models.pixtral'].append('PixtralImageProcessor') _import_structure['models.poolformer'].extend(['PoolFormerFeatureExtractor', 'PoolFormerImageProcessor']) _import_structure['models.pvt'].extend(['PvtImageProcessor']) _import_structure['models.qwen2_vl'].extend(['Qwen2VLImageProcessor']) _import_structure['models.rt_detr'].extend(['RTDetrImageProcessor']) _import_structure['models.sam'].extend(['SamImageProcessor']) _import_structure['models.segformer'].extend(['SegformerFeatureExtractor', 'SegformerImageProcessor']) _import_structure['models.seggpt'].extend(['SegGptImageProcessor']) _import_structure['models.siglip'].append('SiglipImageProcessor') _import_structure['models.superpoint'].extend(['SuperPointImageProcessor']) _import_structure['models.swin2sr'].append('Swin2SRImageProcessor') _import_structure['models.tvp'].append('TvpImageProcessor') _import_structure['models.video_llava'].append('VideoLlavaImageProcessor') _import_structure['models.videomae'].extend(['VideoMAEFeatureExtractor', 'VideoMAEImageProcessor']) _import_structure['models.vilt'].extend(['ViltFeatureExtractor', 'ViltImageProcessor', 'ViltProcessor']) _import_structure['models.vit'].extend(['ViTFeatureExtractor', 'ViTImageProcessor']) _import_structure['models.vitmatte'].append('VitMatteImageProcessor') _import_structure['models.vivit'].append('VivitImageProcessor') _import_structure['models.yolos'].extend(['YolosFeatureExtractor', 'YolosImageProcessor']) _import_structure['models.zoedepth'].append('ZoeDepthImageProcessor') try: if not is_torchvision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torchvision_objects _import_structure['utils.dummy_torchvision_objects'] = [name for name in dir(dummy_torchvision_objects) if not name.startswith('_')] else: _import_structure['image_processing_utils_fast'] = ['BaseImageProcessorFast'] _import_structure['models.vit'].append('ViTImageProcessorFast') try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_pt_objects _import_structure['utils.dummy_pt_objects'] = [name for name in dir(dummy_pt_objects) if not name.startswith('_')] else: _import_structure['activations'] = [] _import_structure['benchmark.benchmark'] = ['PyTorchBenchmark'] _import_structure['benchmark.benchmark_args'] = ['PyTorchBenchmarkArguments'] _import_structure['cache_utils'] = ['Cache', 'CacheConfig', 'DynamicCache', 'EncoderDecoderCache', 'HQQQuantizedCache', 'HybridCache', 'MambaCache', 'OffloadedCache', 'OffloadedStaticCache', 'QuantizedCache', 'QuantizedCacheConfig', 'QuantoQuantizedCache', 'SinkCache', 'SlidingWindowCache', 'StaticCache'] _import_structure['data.datasets'] = ['GlueDataset', 'GlueDataTrainingArguments', 'LineByLineTextDataset', 'LineByLineWithRefDataset', 'LineByLineWithSOPTextDataset', 'SquadDataset', 'SquadDataTrainingArguments', 'TextDataset', 'TextDatasetForNextSentencePrediction'] _import_structure['generation'].extend(['AlternatingCodebooksLogitsProcessor', 'BeamScorer', 'BeamSearchScorer', 'ClassifierFreeGuidanceLogitsProcessor', 'ConstrainedBeamSearchScorer', 'Constraint', 'ConstraintListState', 'DisjunctiveConstraint', 'EncoderNoRepeatNGramLogitsProcessor', 'EncoderRepetitionPenaltyLogitsProcessor', 'EosTokenCriteria', 'EpsilonLogitsWarper', 'EtaLogitsWarper', 'ExponentialDecayLengthPenalty', 'ForcedBOSTokenLogitsProcessor', 'ForcedEOSTokenLogitsProcessor', 'GenerationMixin', 'HammingDiversityLogitsProcessor', 'InfNanRemoveLogitsProcessor', 'LogitNormalization', 'LogitsProcessor', 'LogitsProcessorList', 'LogitsWarper', 'MaxLengthCriteria', 'MaxTimeCriteria', 'MinLengthLogitsProcessor', 'MinNewTokensLengthLogitsProcessor', 'MinPLogitsWarper', 'NoBadWordsLogitsProcessor', 'NoRepeatNGramLogitsProcessor', 'PhrasalConstraint', 'PrefixConstrainedLogitsProcessor', 'RepetitionPenaltyLogitsProcessor', 'SequenceBiasLogitsProcessor', 'StoppingCriteria', 'StoppingCriteriaList', 'StopStringCriteria', 'SuppressTokensAtBeginLogitsProcessor', 'SuppressTokensLogitsProcessor', 'TemperatureLogitsWarper', 'TopKLogitsWarper', 'TopPLogitsWarper', 'TypicalLogitsWarper', 'UnbatchedClassifierFreeGuidanceLogitsProcessor', 'WatermarkDetector', 'WatermarkLogitsProcessor', 'WhisperTimeStampLogitsProcessor']) _import_structure['integrations.executorch'] = ['TorchExportableModuleWithStaticCache', 'convert_and_export_with_cache'] _import_structure['modeling_flash_attention_utils'] = [] _import_structure['modeling_outputs'] = [] _import_structure['modeling_rope_utils'] = ['ROPE_INIT_FUNCTIONS'] _import_structure['modeling_utils'] = ['PreTrainedModel'] _import_structure['models.albert'].extend(['AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert']) _import_structure['models.align'].extend(['AlignModel', 'AlignPreTrainedModel', 'AlignTextModel', 'AlignVisionModel']) _import_structure['models.altclip'].extend(['AltCLIPModel', 'AltCLIPPreTrainedModel', 'AltCLIPTextModel', 'AltCLIPVisionModel']) _import_structure['models.audio_spectrogram_transformer'].extend(['ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel']) _import_structure['models.auto'].extend(['MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING', 'MODEL_FOR_BACKBONE_MAPPING', 'MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING', 'MODEL_FOR_CAUSAL_LM_MAPPING', 'MODEL_FOR_CTC_MAPPING', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'MODEL_FOR_IMAGE_MAPPING', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING', 'MODEL_FOR_IMAGE_TO_IMAGE_MAPPING', 'MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING', 'MODEL_FOR_KEYPOINT_DETECTION_MAPPING', 'MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING', 'MODEL_FOR_MASKED_LM_MAPPING', 'MODEL_FOR_MASK_GENERATION_MAPPING', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'MODEL_FOR_OBJECT_DETECTION_MAPPING', 'MODEL_FOR_PRETRAINING_MAPPING', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_TEXT_ENCODING_MAPPING', 'MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING', 'MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING', 'MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING', 'MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING', 'MODEL_FOR_VISION_2_SEQ_MAPPING', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING', 'MODEL_MAPPING', 'MODEL_WITH_LM_HEAD_MAPPING', 'AutoBackbone', 'AutoModel', 'AutoModelForAudioClassification', 'AutoModelForAudioFrameClassification', 'AutoModelForAudioXVector', 'AutoModelForCausalLM', 'AutoModelForCTC', 'AutoModelForDepthEstimation', 'AutoModelForDocumentQuestionAnswering', 'AutoModelForImageClassification', 'AutoModelForImageSegmentation', 'AutoModelForImageToImage', 'AutoModelForInstanceSegmentation', 'AutoModelForKeypointDetection', 'AutoModelForMaskedImageModeling', 'AutoModelForMaskedLM', 'AutoModelForMaskGeneration', 'AutoModelForMultipleChoice', 'AutoModelForNextSentencePrediction', 'AutoModelForObjectDetection', 'AutoModelForPreTraining', 'AutoModelForQuestionAnswering', 'AutoModelForSemanticSegmentation', 'AutoModelForSeq2SeqLM', 'AutoModelForSequenceClassification', 'AutoModelForSpeechSeq2Seq', 'AutoModelForTableQuestionAnswering', 'AutoModelForTextEncoding', 'AutoModelForTextToSpectrogram', 'AutoModelForTextToWaveform', 'AutoModelForTokenClassification', 'AutoModelForUniversalSegmentation', 'AutoModelForVideoClassification', 'AutoModelForVision2Seq', 'AutoModelForVisualQuestionAnswering', 'AutoModelForZeroShotImageClassification', 'AutoModelForZeroShotObjectDetection', 'AutoModelWithLMHead']) _import_structure['models.autoformer'].extend(['AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel']) _import_structure['models.bark'].extend(['BarkCausalModel', 'BarkCoarseModel', 'BarkFineModel', 'BarkModel', 'BarkPreTrainedModel', 'BarkSemanticModel']) _import_structure['models.bart'].extend(['BartForCausalLM', 'BartForConditionalGeneration', 'BartForQuestionAnswering', 'BartForSequenceClassification', 'BartModel', 'BartPretrainedModel', 'BartPreTrainedModel', 'PretrainedBartModel']) _import_structure['models.beit'].extend(['BeitBackbone', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel']) _import_structure['models.bert'].extend(['BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert']) _import_structure['models.bert_generation'].extend(['BertGenerationDecoder', 'BertGenerationEncoder', 'BertGenerationPreTrainedModel', 'load_tf_weights_in_bert_generation']) _import_structure['models.big_bird'].extend(['BigBirdForCausalLM', 'BigBirdForMaskedLM', 'BigBirdForMultipleChoice', 'BigBirdForPreTraining', 'BigBirdForQuestionAnswering', 'BigBirdForSequenceClassification', 'BigBirdForTokenClassification', 'BigBirdModel', 'BigBirdPreTrainedModel', 'load_tf_weights_in_big_bird']) _import_structure['models.bigbird_pegasus'].extend(['BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel']) _import_structure['models.biogpt'].extend(['BioGptForCausalLM', 'BioGptForSequenceClassification', 'BioGptForTokenClassification', 'BioGptModel', 'BioGptPreTrainedModel']) _import_structure['models.bit'].extend(['BitBackbone', 'BitForImageClassification', 'BitModel', 'BitPreTrainedModel']) _import_structure['models.blenderbot'].extend(['BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel']) _import_structure['models.blenderbot_small'].extend(['BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel']) _import_structure['models.blip'].extend(['BlipForConditionalGeneration', 'BlipForImageTextRetrieval', 'BlipForQuestionAnswering', 'BlipModel', 'BlipPreTrainedModel', 'BlipTextModel', 'BlipVisionModel']) _import_structure['models.blip_2'].extend(['Blip2ForConditionalGeneration', 'Blip2ForImageTextRetrieval', 'Blip2Model', 'Blip2PreTrainedModel', 'Blip2QFormerModel', 'Blip2TextModelWithProjection', 'Blip2VisionModel', 'Blip2VisionModelWithProjection']) _import_structure['models.bloom'].extend(['BloomForCausalLM', 'BloomForQuestionAnswering', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomModel', 'BloomPreTrainedModel']) _import_structure['models.bridgetower'].extend(['BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel']) _import_structure['models.bros'].extend(['BrosForTokenClassification', 'BrosModel', 'BrosPreTrainedModel', 'BrosProcessor', 'BrosSpadeEEForTokenClassification', 'BrosSpadeELForTokenClassification']) _import_structure['models.camembert'].extend(['CamembertForCausalLM', 'CamembertForMaskedLM', 'CamembertForMultipleChoice', 'CamembertForQuestionAnswering', 'CamembertForSequenceClassification', 'CamembertForTokenClassification', 'CamembertModel', 'CamembertPreTrainedModel']) _import_structure['models.canine'].extend(['CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine']) _import_structure['models.chameleon'].extend(['ChameleonForConditionalGeneration', 'ChameleonModel', 'ChameleonPreTrainedModel', 'ChameleonProcessor', 'ChameleonVQVAE']) _import_structure['models.chinese_clip'].extend(['ChineseCLIPModel', 'ChineseCLIPPreTrainedModel', 'ChineseCLIPTextModel', 'ChineseCLIPVisionModel']) _import_structure['models.clap'].extend(['ClapAudioModel', 'ClapAudioModelWithProjection', 'ClapFeatureExtractor', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection']) _import_structure['models.clip'].extend(['CLIPForImageClassification', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection']) _import_structure['models.clipseg'].extend(['CLIPSegForImageSegmentation', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel']) _import_structure['models.clvp'].extend(['ClvpDecoder', 'ClvpEncoder', 'ClvpForCausalLM', 'ClvpModel', 'ClvpModelForConditionalGeneration', 'ClvpPreTrainedModel']) _import_structure['models.codegen'].extend(['CodeGenForCausalLM', 'CodeGenModel', 'CodeGenPreTrainedModel']) _import_structure['models.cohere'].extend(['CohereForCausalLM', 'CohereModel', 'CoherePreTrainedModel']) _import_structure['models.conditional_detr'].extend(['ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel']) _import_structure['models.convbert'].extend(['ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert']) _import_structure['models.convnext'].extend(['ConvNextBackbone', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel']) _import_structure['models.convnextv2'].extend(['ConvNextV2Backbone', 'ConvNextV2ForImageClassification', 'ConvNextV2Model', 'ConvNextV2PreTrainedModel']) _import_structure['models.cpmant'].extend(['CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel']) _import_structure['models.ctrl'].extend(['CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel']) _import_structure['models.cvt'].extend(['CvtForImageClassification', 'CvtModel', 'CvtPreTrainedModel']) _import_structure['models.dac'].extend(['DacModel', 'DacPreTrainedModel']) _import_structure['models.data2vec'].extend(['Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', 'Data2VecVisionForImageClassification', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel']) _import_structure['models.dbrx'].extend(['DbrxForCausalLM', 'DbrxModel', 'DbrxPreTrainedModel']) _import_structure['models.deberta'].extend(['DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel']) _import_structure['models.deberta_v2'].extend(['DebertaV2ForMaskedLM', 'DebertaV2ForMultipleChoice', 'DebertaV2ForQuestionAnswering', 'DebertaV2ForSequenceClassification', 'DebertaV2ForTokenClassification', 'DebertaV2Model', 'DebertaV2PreTrainedModel']) _import_structure['models.decision_transformer'].extend(['DecisionTransformerGPT2Model', 'DecisionTransformerGPT2PreTrainedModel', 'DecisionTransformerModel', 'DecisionTransformerPreTrainedModel']) _import_structure['models.deformable_detr'].extend(['DeformableDetrForObjectDetection', 'DeformableDetrModel', 'DeformableDetrPreTrainedModel']) _import_structure['models.deit'].extend(['DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel']) _import_structure['models.deprecated.deta'].extend(['DetaForObjectDetection', 'DetaModel', 'DetaPreTrainedModel']) _import_structure['models.deprecated.efficientformer'].extend(['EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher', 'EfficientFormerModel', 'EfficientFormerPreTrainedModel']) _import_structure['models.deprecated.ernie_m'].extend(['ErnieMForInformationExtraction', 'ErnieMForMultipleChoice', 'ErnieMForQuestionAnswering', 'ErnieMForSequenceClassification', 'ErnieMForTokenClassification', 'ErnieMModel', 'ErnieMPreTrainedModel']) _import_structure['models.deprecated.gptsan_japanese'].extend(['GPTSanJapaneseForConditionalGeneration', 'GPTSanJapaneseModel', 'GPTSanJapanesePreTrainedModel']) _import_structure['models.deprecated.graphormer'].extend(['GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel']) _import_structure['models.deprecated.jukebox'].extend(['JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxPrior', 'JukeboxVQVAE']) _import_structure['models.deprecated.mctct'].extend(['MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel']) _import_structure['models.deprecated.mega'].extend(['MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel']) _import_structure['models.deprecated.mmbt'].extend(['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']) _import_structure['models.deprecated.nat'].extend(['NatBackbone', 'NatForImageClassification', 'NatModel', 'NatPreTrainedModel']) _import_structure['models.deprecated.nezha'].extend(['NezhaForMaskedLM', 'NezhaForMultipleChoice', 'NezhaForNextSentencePrediction', 'NezhaForPreTraining', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel']) _import_structure['models.deprecated.open_llama'].extend(['OpenLlamaForCausalLM', 'OpenLlamaForSequenceClassification', 'OpenLlamaModel', 'OpenLlamaPreTrainedModel']) _import_structure['models.deprecated.qdqbert'].extend(['QDQBertForMaskedLM', 'QDQBertForMultipleChoice', 'QDQBertForNextSentencePrediction', 'QDQBertForQuestionAnswering', 'QDQBertForSequenceClassification', 'QDQBertForTokenClassification', 'QDQBertLMHeadModel', 'QDQBertModel', 'QDQBertPreTrainedModel', 'load_tf_weights_in_qdqbert']) _import_structure['models.deprecated.realm'].extend(['RealmEmbedder', 'RealmForOpenQA', 'RealmKnowledgeAugEncoder', 'RealmPreTrainedModel', 'RealmReader', 'RealmRetriever', 'RealmScorer', 'load_tf_weights_in_realm']) _import_structure['models.deprecated.retribert'].extend(['RetriBertModel', 'RetriBertPreTrainedModel']) _import_structure['models.deprecated.speech_to_text_2'].extend(['Speech2Text2ForCausalLM', 'Speech2Text2PreTrainedModel']) _import_structure['models.deprecated.trajectory_transformer'].extend(['TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel']) _import_structure['models.deprecated.transfo_xl'].extend(['AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl']) _import_structure['models.deprecated.tvlt'].extend(['TvltForAudioVisualClassification', 'TvltForPreTraining', 'TvltModel', 'TvltPreTrainedModel']) _import_structure['models.deprecated.van'].extend(['VanForImageClassification', 'VanModel', 'VanPreTrainedModel']) _import_structure['models.deprecated.vit_hybrid'].extend(['ViTHybridForImageClassification', 'ViTHybridModel', 'ViTHybridPreTrainedModel']) _import_structure['models.deprecated.xlm_prophetnet'].extend(['XLMProphetNetDecoder', 'XLMProphetNetEncoder', 'XLMProphetNetForCausalLM', 'XLMProphetNetForConditionalGeneration', 'XLMProphetNetModel', 'XLMProphetNetPreTrainedModel']) _import_structure['models.depth_anything'].extend(['DepthAnythingForDepthEstimation', 'DepthAnythingPreTrainedModel']) _import_structure['models.detr'].extend(['DetrForObjectDetection', 'DetrForSegmentation', 'DetrModel', 'DetrPreTrainedModel']) _import_structure['models.dinat'].extend(['DinatBackbone', 'DinatForImageClassification', 'DinatModel', 'DinatPreTrainedModel']) _import_structure['models.dinov2'].extend(['Dinov2Backbone', 'Dinov2ForImageClassification', 'Dinov2Model', 'Dinov2PreTrainedModel']) _import_structure['models.distilbert'].extend(['DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel']) _import_structure['models.donut'].extend(['DonutSwinModel', 'DonutSwinPreTrainedModel']) _import_structure['models.dpr'].extend(['DPRContextEncoder', 'DPRPretrainedContextEncoder', 'DPRPreTrainedModel', 'DPRPretrainedQuestionEncoder', 'DPRPretrainedReader', 'DPRQuestionEncoder', 'DPRReader']) _import_structure['models.dpt'].extend(['DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel']) _import_structure['models.efficientnet'].extend(['EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel']) _import_structure['models.electra'].extend(['ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra']) _import_structure['models.encodec'].extend(['EncodecModel', 'EncodecPreTrainedModel']) _import_structure['models.encoder_decoder'].append('EncoderDecoderModel') _import_structure['models.ernie'].extend(['ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel']) _import_structure['models.esm'].extend(['EsmFoldPreTrainedModel', 'EsmForMaskedLM', 'EsmForProteinFolding', 'EsmForSequenceClassification', 'EsmForTokenClassification', 'EsmModel', 'EsmPreTrainedModel']) _import_structure['models.falcon'].extend(['FalconForCausalLM', 'FalconForQuestionAnswering', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconModel', 'FalconPreTrainedModel']) _import_structure['models.falcon_mamba'].extend(['FalconMambaForCausalLM', 'FalconMambaModel', 'FalconMambaPreTrainedModel']) _import_structure['models.fastspeech2_conformer'].extend(['FastSpeech2ConformerHifiGan', 'FastSpeech2ConformerModel', 'FastSpeech2ConformerPreTrainedModel', 'FastSpeech2ConformerWithHifiGan']) _import_structure['models.flaubert'].extend(['FlaubertForMultipleChoice', 'FlaubertForQuestionAnswering', 'FlaubertForQuestionAnsweringSimple', 'FlaubertForSequenceClassification', 'FlaubertForTokenClassification', 'FlaubertModel', 'FlaubertPreTrainedModel', 'FlaubertWithLMHeadModel']) _import_structure['models.flava'].extend(['FlavaForPreTraining', 'FlavaImageCodebook', 'FlavaImageModel', 'FlavaModel', 'FlavaMultimodalModel', 'FlavaPreTrainedModel', 'FlavaTextModel']) _import_structure['models.fnet'].extend(['FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetModel', 'FNetPreTrainedModel']) _import_structure['models.focalnet'].extend(['FocalNetBackbone', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetModel', 'FocalNetPreTrainedModel']) _import_structure['models.fsmt'].extend(['FSMTForConditionalGeneration', 'FSMTModel', 'PretrainedFSMTModel']) _import_structure['models.funnel'].extend(['FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel']) _import_structure['models.fuyu'].extend(['FuyuForCausalLM', 'FuyuPreTrainedModel']) _import_structure['models.gemma'].extend(['GemmaForCausalLM', 'GemmaForSequenceClassification', 'GemmaForTokenClassification', 'GemmaModel', 'GemmaPreTrainedModel']) _import_structure['models.gemma2'].extend(['Gemma2ForCausalLM', 'Gemma2ForSequenceClassification', 'Gemma2ForTokenClassification', 'Gemma2Model', 'Gemma2PreTrainedModel']) _import_structure['models.git'].extend(['GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel']) _import_structure['models.glpn'].extend(['GLPNForDepthEstimation', 'GLPNModel', 'GLPNPreTrainedModel']) _import_structure['models.gpt2'].extend(['GPT2DoubleHeadsModel', 'GPT2ForQuestionAnswering', 'GPT2ForSequenceClassification', 'GPT2ForTokenClassification', 'GPT2LMHeadModel', 'GPT2Model', 'GPT2PreTrainedModel', 'load_tf_weights_in_gpt2']) _import_structure['models.gpt_bigcode'].extend(['GPTBigCodeForCausalLM', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel']) _import_structure['models.gpt_neo'].extend(['GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo']) _import_structure['models.gpt_neox'].extend(['GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel']) _import_structure['models.gpt_neox_japanese'].extend(['GPTNeoXJapaneseForCausalLM', 'GPTNeoXJapaneseModel', 'GPTNeoXJapanesePreTrainedModel']) _import_structure['models.gptj'].extend(['GPTJForCausalLM', 'GPTJForQuestionAnswering', 'GPTJForSequenceClassification', 'GPTJModel', 'GPTJPreTrainedModel']) _import_structure['models.granite'].extend(['GraniteForCausalLM', 'GraniteModel', 'GranitePreTrainedModel']) _import_structure['models.grounding_dino'].extend(['GroundingDinoForObjectDetection', 'GroundingDinoModel', 'GroundingDinoPreTrainedModel']) _import_structure['models.groupvit'].extend(['GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel']) _import_structure['models.hiera'].extend(['HieraBackbone', 'HieraForImageClassification', 'HieraForPreTraining', 'HieraModel', 'HieraPreTrainedModel']) _import_structure['models.hubert'].extend(['HubertForCTC', 'HubertForSequenceClassification', 'HubertModel', 'HubertPreTrainedModel']) _import_structure['models.ibert'].extend(['IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel']) _import_structure['models.idefics'].extend(['IdeficsForVisionText2Text', 'IdeficsModel', 'IdeficsPreTrainedModel', 'IdeficsProcessor']) _import_structure['models.idefics2'].extend(['Idefics2ForConditionalGeneration', 'Idefics2Model', 'Idefics2PreTrainedModel', 'Idefics2Processor']) _import_structure['models.imagegpt'].extend(['ImageGPTForCausalImageModeling', 'ImageGPTForImageClassification', 'ImageGPTModel', 'ImageGPTPreTrainedModel', 'load_tf_weights_in_imagegpt']) _import_structure['models.informer'].extend(['InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel']) _import_structure['models.instructblip'].extend(['InstructBlipForConditionalGeneration', 'InstructBlipPreTrainedModel', 'InstructBlipQFormerModel', 'InstructBlipVisionModel']) _import_structure['models.instructblipvideo'].extend(['InstructBlipVideoForConditionalGeneration', 'InstructBlipVideoPreTrainedModel', 'InstructBlipVideoQFormerModel', 'InstructBlipVideoVisionModel']) _import_structure['models.jamba'].extend(['JambaForCausalLM', 'JambaForSequenceClassification', 'JambaModel', 'JambaPreTrainedModel']) _import_structure['models.jetmoe'].extend(['JetMoeForCausalLM', 'JetMoeForSequenceClassification', 'JetMoeModel', 'JetMoePreTrainedModel']) _import_structure['models.kosmos2'].extend(['Kosmos2ForConditionalGeneration', 'Kosmos2Model', 'Kosmos2PreTrainedModel']) _import_structure['models.layoutlm'].extend(['LayoutLMForMaskedLM', 'LayoutLMForQuestionAnswering', 'LayoutLMForSequenceClassification', 'LayoutLMForTokenClassification', 'LayoutLMModel', 'LayoutLMPreTrainedModel']) _import_structure['models.layoutlmv2'].extend(['LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel']) _import_structure['models.layoutlmv3'].extend(['LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel']) _import_structure['models.led'].extend(['LEDForConditionalGeneration', 'LEDForQuestionAnswering', 'LEDForSequenceClassification', 'LEDModel', 'LEDPreTrainedModel']) _import_structure['models.levit'].extend(['LevitForImageClassification', 'LevitForImageClassificationWithTeacher', 'LevitModel', 'LevitPreTrainedModel']) _import_structure['models.lilt'].extend(['LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel']) _import_structure['models.llama'].extend(['LlamaForCausalLM', 'LlamaForQuestionAnswering', 'LlamaForSequenceClassification', 'LlamaForTokenClassification', 'LlamaModel', 'LlamaPreTrainedModel']) _import_structure['models.llava'].extend(['LlavaForConditionalGeneration', 'LlavaPreTrainedModel']) _import_structure['models.llava_next'].extend(['LlavaNextForConditionalGeneration', 'LlavaNextPreTrainedModel']) _import_structure['models.llava_next_video'].extend(['LlavaNextVideoForConditionalGeneration', 'LlavaNextVideoPreTrainedModel']) _import_structure['models.llava_onevision'].extend(['LlavaOnevisionForConditionalGeneration', 'LlavaOnevisionPreTrainedModel']) _import_structure['models.longformer'].extend(['LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel']) _import_structure['models.longt5'].extend(['LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel']) _import_structure['models.luke'].extend(['LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMaskedLM', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeModel', 'LukePreTrainedModel']) _import_structure['models.lxmert'].extend(['LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder']) _import_structure['models.m2m_100'].extend(['M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel']) _import_structure['models.mamba'].extend(['MambaForCausalLM', 'MambaModel', 'MambaPreTrainedModel']) _import_structure['models.mamba2'].extend(['Mamba2ForCausalLM', 'Mamba2Model', 'Mamba2PreTrainedModel']) _import_structure['models.marian'].extend(['MarianForCausalLM', 'MarianModel', 'MarianMTModel', 'MarianPreTrainedModel']) _import_structure['models.markuplm'].extend(['MarkupLMForQuestionAnswering', 'MarkupLMForSequenceClassification', 'MarkupLMForTokenClassification', 'MarkupLMModel', 'MarkupLMPreTrainedModel']) _import_structure['models.mask2former'].extend(['Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel']) _import_structure['models.maskformer'].extend(['MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', 'MaskFormerSwinBackbone']) _import_structure['models.mbart'].extend(['MBartForCausalLM', 'MBartForConditionalGeneration', 'MBartForQuestionAnswering', 'MBartForSequenceClassification', 'MBartModel', 'MBartPreTrainedModel']) _import_structure['models.megatron_bert'].extend(['MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel']) _import_structure['models.mgp_str'].extend(['MgpstrForSceneTextRecognition', 'MgpstrModel', 'MgpstrPreTrainedModel']) _import_structure['models.mistral'].extend(['MistralForCausalLM', 'MistralForSequenceClassification', 'MistralForTokenClassification', 'MistralModel', 'MistralPreTrainedModel']) _import_structure['models.mixtral'].extend(['MixtralForCausalLM', 'MixtralForSequenceClassification', 'MixtralForTokenClassification', 'MixtralModel', 'MixtralPreTrainedModel']) _import_structure['models.mobilebert'].extend(['MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert']) _import_structure['models.mobilenet_v1'].extend(['MobileNetV1ForImageClassification', 'MobileNetV1Model', 'MobileNetV1PreTrainedModel', 'load_tf_weights_in_mobilenet_v1']) _import_structure['models.mobilenet_v2'].extend(['MobileNetV2ForImageClassification', 'MobileNetV2ForSemanticSegmentation', 'MobileNetV2Model', 'MobileNetV2PreTrainedModel', 'load_tf_weights_in_mobilenet_v2']) _import_structure['models.mobilevit'].extend(['MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel']) _import_structure['models.mobilevitv2'].extend(['MobileViTV2ForImageClassification', 'MobileViTV2ForSemanticSegmentation', 'MobileViTV2Model', 'MobileViTV2PreTrainedModel']) _import_structure['models.mpnet'].extend(['MPNetForMaskedLM', 'MPNetForMultipleChoice', 'MPNetForQuestionAnswering', 'MPNetForSequenceClassification', 'MPNetForTokenClassification', 'MPNetModel', 'MPNetPreTrainedModel']) _import_structure['models.mpt'].extend(['MptForCausalLM', 'MptForQuestionAnswering', 'MptForSequenceClassification', 'MptForTokenClassification', 'MptModel', 'MptPreTrainedModel']) _import_structure['models.mra'].extend(['MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraModel', 'MraPreTrainedModel']) _import_structure['models.mt5'].extend(['MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5ForSequenceClassification', 'MT5ForTokenClassification', 'MT5Model', 'MT5PreTrainedModel']) _import_structure['models.musicgen'].extend(['MusicgenForCausalLM', 'MusicgenForConditionalGeneration', 'MusicgenModel', 'MusicgenPreTrainedModel', 'MusicgenProcessor']) _import_structure['models.musicgen_melody'].extend(['MusicgenMelodyForCausalLM', 'MusicgenMelodyForConditionalGeneration', 'MusicgenMelodyModel', 'MusicgenMelodyPreTrainedModel']) _import_structure['models.mvp'].extend(['MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel']) _import_structure['models.nemotron'].extend(['NemotronForCausalLM', 'NemotronForQuestionAnswering', 'NemotronForSequenceClassification', 'NemotronForTokenClassification', 'NemotronModel', 'NemotronPreTrainedModel']) _import_structure['models.nllb_moe'].extend(['NllbMoeForConditionalGeneration', 'NllbMoeModel', 'NllbMoePreTrainedModel', 'NllbMoeSparseMLP', 'NllbMoeTop2Router']) _import_structure['models.nystromformer'].extend(['NystromformerForMaskedLM', 'NystromformerForMultipleChoice', 'NystromformerForQuestionAnswering', 'NystromformerForSequenceClassification', 'NystromformerForTokenClassification', 'NystromformerModel', 'NystromformerPreTrainedModel']) _import_structure['models.olmo'].extend(['OlmoForCausalLM', 'OlmoModel', 'OlmoPreTrainedModel']) _import_structure['models.olmoe'].extend(['OlmoeForCausalLM', 'OlmoeModel', 'OlmoePreTrainedModel']) _import_structure['models.oneformer'].extend(['OneFormerForUniversalSegmentation', 'OneFormerModel', 'OneFormerPreTrainedModel']) _import_structure['models.openai'].extend(['OpenAIGPTDoubleHeadsModel', 'OpenAIGPTForSequenceClassification', 'OpenAIGPTLMHeadModel', 'OpenAIGPTModel', 'OpenAIGPTPreTrainedModel', 'load_tf_weights_in_openai_gpt']) _import_structure['models.opt'].extend(['OPTForCausalLM', 'OPTForQuestionAnswering', 'OPTForSequenceClassification', 'OPTModel', 'OPTPreTrainedModel']) _import_structure['models.owlv2'].extend(['Owlv2ForObjectDetection', 'Owlv2Model', 'Owlv2PreTrainedModel', 'Owlv2TextModel', 'Owlv2VisionModel']) _import_structure['models.owlvit'].extend(['OwlViTForObjectDetection', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel']) _import_structure['models.paligemma'].extend(['PaliGemmaForConditionalGeneration', 'PaliGemmaPreTrainedModel', 'PaliGemmaProcessor']) _import_structure['models.patchtsmixer'].extend(['PatchTSMixerForPrediction', 'PatchTSMixerForPretraining', 'PatchTSMixerForRegression', 'PatchTSMixerForTimeSeriesClassification', 'PatchTSMixerModel', 'PatchTSMixerPreTrainedModel']) _import_structure['models.patchtst'].extend(['PatchTSTForClassification', 'PatchTSTForPrediction', 'PatchTSTForPretraining', 'PatchTSTForRegression', 'PatchTSTModel', 'PatchTSTPreTrainedModel']) _import_structure['models.pegasus'].extend(['PegasusForCausalLM', 'PegasusForConditionalGeneration', 'PegasusModel', 'PegasusPreTrainedModel']) _import_structure['models.pegasus_x'].extend(['PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel']) _import_structure['models.perceiver'].extend(['PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverModel', 'PerceiverPreTrainedModel']) _import_structure['models.persimmon'].extend(['PersimmonForCausalLM', 'PersimmonForSequenceClassification', 'PersimmonForTokenClassification', 'PersimmonModel', 'PersimmonPreTrainedModel']) _import_structure['models.phi'].extend(['PhiForCausalLM', 'PhiForSequenceClassification', 'PhiForTokenClassification', 'PhiModel', 'PhiPreTrainedModel']) _import_structure['models.phi3'].extend(['Phi3ForCausalLM', 'Phi3ForSequenceClassification', 'Phi3ForTokenClassification', 'Phi3Model', 'Phi3PreTrainedModel']) _import_structure['models.pix2struct'].extend(['Pix2StructForConditionalGeneration', 'Pix2StructPreTrainedModel', 'Pix2StructTextModel', 'Pix2StructVisionModel']) _import_structure['models.pixtral'].extend(['PixtralModel', 'PixtralPreTrainedModel']) _import_structure['models.plbart'].extend(['PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel']) _import_structure['models.poolformer'].extend(['PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel']) _import_structure['models.pop2piano'].extend(['Pop2PianoForConditionalGeneration', 'Pop2PianoPreTrainedModel']) _import_structure['models.prophetnet'].extend(['ProphetNetDecoder', 'ProphetNetEncoder', 'ProphetNetForCausalLM', 'ProphetNetForConditionalGeneration', 'ProphetNetModel', 'ProphetNetPreTrainedModel']) _import_structure['models.pvt'].extend(['PvtForImageClassification', 'PvtModel', 'PvtPreTrainedModel']) _import_structure['models.pvt_v2'].extend(['PvtV2Backbone', 'PvtV2ForImageClassification', 'PvtV2Model', 'PvtV2PreTrainedModel']) _import_structure['models.qwen2'].extend(['Qwen2ForCausalLM', 'Qwen2ForSequenceClassification', 'Qwen2ForTokenClassification', 'Qwen2Model', 'Qwen2PreTrainedModel']) _import_structure['models.qwen2_audio'].extend(['Qwen2AudioEncoder', 'Qwen2AudioForConditionalGeneration', 'Qwen2AudioPreTrainedModel']) _import_structure['models.qwen2_moe'].extend(['Qwen2MoeForCausalLM', 'Qwen2MoeForSequenceClassification', 'Qwen2MoeForTokenClassification', 'Qwen2MoeModel', 'Qwen2MoePreTrainedModel']) _import_structure['models.qwen2_vl'].extend(['Qwen2VLForConditionalGeneration', 'Qwen2VLModel', 'Qwen2VLPreTrainedModel']) _import_structure['models.rag'].extend(['RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration']) _import_structure['models.recurrent_gemma'].extend(['RecurrentGemmaForCausalLM', 'RecurrentGemmaModel', 'RecurrentGemmaPreTrainedModel']) _import_structure['models.reformer'].extend(['ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel']) _import_structure['models.regnet'].extend(['RegNetForImageClassification', 'RegNetModel', 'RegNetPreTrainedModel']) _import_structure['models.rembert'].extend(['RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert']) _import_structure['models.resnet'].extend(['ResNetBackbone', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel']) _import_structure['models.roberta'].extend(['RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel']) _import_structure['models.roberta_prelayernorm'].extend(['RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel']) _import_structure['models.roc_bert'].extend(['RoCBertForCausalLM', 'RoCBertForMaskedLM', 'RoCBertForMultipleChoice', 'RoCBertForPreTraining', 'RoCBertForQuestionAnswering', 'RoCBertForSequenceClassification', 'RoCBertForTokenClassification', 'RoCBertModel', 'RoCBertPreTrainedModel', 'load_tf_weights_in_roc_bert']) _import_structure['models.roformer'].extend(['RoFormerForCausalLM', 'RoFormerForMaskedLM', 'RoFormerForMultipleChoice', 'RoFormerForQuestionAnswering', 'RoFormerForSequenceClassification', 'RoFormerForTokenClassification', 'RoFormerModel', 'RoFormerPreTrainedModel', 'load_tf_weights_in_roformer']) _import_structure['models.rt_detr'].extend(['RTDetrForObjectDetection', 'RTDetrModel', 'RTDetrPreTrainedModel', 'RTDetrResNetBackbone', 'RTDetrResNetPreTrainedModel']) _import_structure['models.rwkv'].extend(['RwkvForCausalLM', 'RwkvModel', 'RwkvPreTrainedModel']) _import_structure['models.sam'].extend(['SamModel', 'SamPreTrainedModel']) _import_structure['models.seamless_m4t'].extend(['SeamlessM4TCodeHifiGan', 'SeamlessM4TForSpeechToSpeech', 'SeamlessM4TForSpeechToText', 'SeamlessM4TForTextToSpeech', 'SeamlessM4TForTextToText', 'SeamlessM4THifiGan', 'SeamlessM4TModel', 'SeamlessM4TPreTrainedModel', 'SeamlessM4TTextToUnitForConditionalGeneration', 'SeamlessM4TTextToUnitModel']) _import_structure['models.seamless_m4t_v2'].extend(['SeamlessM4Tv2ForSpeechToSpeech', 'SeamlessM4Tv2ForSpeechToText', 'SeamlessM4Tv2ForTextToSpeech', 'SeamlessM4Tv2ForTextToText', 'SeamlessM4Tv2Model', 'SeamlessM4Tv2PreTrainedModel']) _import_structure['models.segformer'].extend(['SegformerDecodeHead', 'SegformerForImageClassification', 'SegformerForSemanticSegmentation', 'SegformerModel', 'SegformerPreTrainedModel']) _import_structure['models.seggpt'].extend(['SegGptForImageSegmentation', 'SegGptModel', 'SegGptPreTrainedModel']) _import_structure['models.sew'].extend(['SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel']) _import_structure['models.sew_d'].extend(['SEWDForCTC', 'SEWDForSequenceClassification', 'SEWDModel', 'SEWDPreTrainedModel']) _import_structure['models.siglip'].extend(['SiglipForImageClassification', 'SiglipModel', 'SiglipPreTrainedModel', 'SiglipTextModel', 'SiglipVisionModel']) _import_structure['models.speech_encoder_decoder'].extend(['SpeechEncoderDecoderModel']) _import_structure['models.speech_to_text'].extend(['Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel']) _import_structure['models.speecht5'].extend(['SpeechT5ForSpeechToSpeech', 'SpeechT5ForSpeechToText', 'SpeechT5ForTextToSpeech', 'SpeechT5HifiGan', 'SpeechT5Model', 'SpeechT5PreTrainedModel']) _import_structure['models.splinter'].extend(['SplinterForPreTraining', 'SplinterForQuestionAnswering', 'SplinterModel', 'SplinterPreTrainedModel']) _import_structure['models.squeezebert'].extend(['SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertPreTrainedModel']) _import_structure['models.stablelm'].extend(['StableLmForCausalLM', 'StableLmForSequenceClassification', 'StableLmForTokenClassification', 'StableLmModel', 'StableLmPreTrainedModel']) _import_structure['models.starcoder2'].extend(['Starcoder2ForCausalLM', 'Starcoder2ForSequenceClassification', 'Starcoder2ForTokenClassification', 'Starcoder2Model', 'Starcoder2PreTrainedModel']) _import_structure['models.superpoint'].extend(['SuperPointForKeypointDetection', 'SuperPointPreTrainedModel']) _import_structure['models.swiftformer'].extend(['SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel']) _import_structure['models.swin'].extend(['SwinBackbone', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel']) _import_structure['models.swin2sr'].extend(['Swin2SRForImageSuperResolution', 'Swin2SRModel', 'Swin2SRPreTrainedModel']) _import_structure['models.swinv2'].extend(['Swinv2Backbone', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel']) _import_structure['models.switch_transformers'].extend(['SwitchTransformersEncoderModel', 'SwitchTransformersForConditionalGeneration', 'SwitchTransformersModel', 'SwitchTransformersPreTrainedModel', 'SwitchTransformersSparseMLP', 'SwitchTransformersTop1Router']) _import_structure['models.t5'].extend(['T5EncoderModel', 'T5ForConditionalGeneration', 'T5ForQuestionAnswering', 'T5ForSequenceClassification', 'T5ForTokenClassification', 'T5Model', 'T5PreTrainedModel', 'load_tf_weights_in_t5']) _import_structure['models.table_transformer'].extend(['TableTransformerForObjectDetection', 'TableTransformerModel', 'TableTransformerPreTrainedModel']) _import_structure['models.tapas'].extend(['TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas']) _import_structure['models.time_series_transformer'].extend(['TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel']) _import_structure['models.timesformer'].extend(['TimesformerForVideoClassification', 'TimesformerModel', 'TimesformerPreTrainedModel']) _import_structure['models.timm_backbone'].extend(['TimmBackbone']) _import_structure['models.trocr'].extend(['TrOCRForCausalLM', 'TrOCRPreTrainedModel']) _import_structure['models.tvp'].extend(['TvpForVideoGrounding', 'TvpModel', 'TvpPreTrainedModel']) _import_structure['models.udop'].extend(['UdopEncoderModel', 'UdopForConditionalGeneration', 'UdopModel', 'UdopPreTrainedModel']) _import_structure['models.umt5'].extend(['UMT5EncoderModel', 'UMT5ForConditionalGeneration', 'UMT5ForQuestionAnswering', 'UMT5ForSequenceClassification', 'UMT5ForTokenClassification', 'UMT5Model', 'UMT5PreTrainedModel']) _import_structure['models.unispeech'].extend(['UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel']) _import_structure['models.unispeech_sat'].extend(['UniSpeechSatForAudioFrameClassification', 'UniSpeechSatForCTC', 'UniSpeechSatForPreTraining', 'UniSpeechSatForSequenceClassification', 'UniSpeechSatForXVector', 'UniSpeechSatModel', 'UniSpeechSatPreTrainedModel']) _import_structure['models.univnet'].extend(['UnivNetModel']) _import_structure['models.upernet'].extend(['UperNetForSemanticSegmentation', 'UperNetPreTrainedModel']) _import_structure['models.video_llava'].extend(['VideoLlavaForConditionalGeneration', 'VideoLlavaPreTrainedModel', 'VideoLlavaProcessor']) _import_structure['models.videomae'].extend(['VideoMAEForPreTraining', 'VideoMAEForVideoClassification', 'VideoMAEModel', 'VideoMAEPreTrainedModel']) _import_structure['models.vilt'].extend(['ViltForImageAndTextRetrieval', 'ViltForImagesAndTextClassification', 'ViltForMaskedLM', 'ViltForQuestionAnswering', 'ViltForTokenClassification', 'ViltModel', 'ViltPreTrainedModel']) _import_structure['models.vipllava'].extend(['VipLlavaForConditionalGeneration', 'VipLlavaPreTrainedModel']) _import_structure['models.vision_encoder_decoder'].extend(['VisionEncoderDecoderModel']) _import_structure['models.vision_text_dual_encoder'].extend(['VisionTextDualEncoderModel']) _import_structure['models.visual_bert'].extend(['VisualBertForMultipleChoice', 'VisualBertForPreTraining', 'VisualBertForQuestionAnswering', 'VisualBertForRegionToPhraseAlignment', 'VisualBertForVisualReasoning', 'VisualBertModel', 'VisualBertPreTrainedModel']) _import_structure['models.vit'].extend(['ViTForImageClassification', 'ViTForMaskedImageModeling', 'ViTModel', 'ViTPreTrainedModel']) _import_structure['models.vit_mae'].extend(['ViTMAEForPreTraining', 'ViTMAEModel', 'ViTMAEPreTrainedModel']) _import_structure['models.vit_msn'].extend(['ViTMSNForImageClassification', 'ViTMSNModel', 'ViTMSNPreTrainedModel']) _import_structure['models.vitdet'].extend(['VitDetBackbone', 'VitDetModel', 'VitDetPreTrainedModel']) _import_structure['models.vitmatte'].extend(['VitMatteForImageMatting', 'VitMattePreTrainedModel']) _import_structure['models.vits'].extend(['VitsModel', 'VitsPreTrainedModel']) _import_structure['models.vivit'].extend(['VivitForVideoClassification', 'VivitModel', 'VivitPreTrainedModel']) _import_structure['models.wav2vec2'].extend(['Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel']) _import_structure['models.wav2vec2_bert'].extend(['Wav2Vec2BertForAudioFrameClassification', 'Wav2Vec2BertForCTC', 'Wav2Vec2BertForSequenceClassification', 'Wav2Vec2BertForXVector', 'Wav2Vec2BertModel', 'Wav2Vec2BertPreTrainedModel']) _import_structure['models.wav2vec2_conformer'].extend(['Wav2Vec2ConformerForAudioFrameClassification', 'Wav2Vec2ConformerForCTC', 'Wav2Vec2ConformerForPreTraining', 'Wav2Vec2ConformerForSequenceClassification', 'Wav2Vec2ConformerForXVector', 'Wav2Vec2ConformerModel', 'Wav2Vec2ConformerPreTrainedModel']) _import_structure['models.wavlm'].extend(['WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel']) _import_structure['models.whisper'].extend(['WhisperForAudioClassification', 'WhisperForCausalLM', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel']) _import_structure['models.x_clip'].extend(['XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel']) _import_structure['models.xglm'].extend(['XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel']) _import_structure['models.xlm'].extend(['XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel']) _import_structure['models.xlm_roberta'].extend(['XLMRobertaForCausalLM', 'XLMRobertaForMaskedLM', 'XLMRobertaForMultipleChoice', 'XLMRobertaForQuestionAnswering', 'XLMRobertaForSequenceClassification', 'XLMRobertaForTokenClassification', 'XLMRobertaModel', 'XLMRobertaPreTrainedModel']) _import_structure['models.xlm_roberta_xl'].extend(['XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel']) _import_structure['models.xlnet'].extend(['XLNetForMultipleChoice', 'XLNetForQuestionAnswering', 'XLNetForQuestionAnsweringSimple', 'XLNetForSequenceClassification', 'XLNetForTokenClassification', 'XLNetLMHeadModel', 'XLNetModel', 'XLNetPreTrainedModel', 'load_tf_weights_in_xlnet']) _import_structure['models.xmod'].extend(['XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel']) _import_structure['models.yolos'].extend(['YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel']) _import_structure['models.yoso'].extend(['YosoForMaskedLM', 'YosoForMultipleChoice', 'YosoForQuestionAnswering', 'YosoForSequenceClassification', 'YosoForTokenClassification', 'YosoModel', 'YosoPreTrainedModel']) _import_structure['models.zoedepth'].extend(['ZoeDepthForDepthEstimation', 'ZoeDepthPreTrainedModel']) _import_structure['optimization'] = ['Adafactor', 'AdamW', 'get_constant_schedule', 'get_constant_schedule_with_warmup', 'get_cosine_schedule_with_warmup', 'get_cosine_with_hard_restarts_schedule_with_warmup', 'get_inverse_sqrt_schedule', 'get_linear_schedule_with_warmup', 'get_polynomial_decay_schedule_with_warmup', 'get_scheduler', 'get_wsd_schedule'] _import_structure['pytorch_utils'] = ['Conv1D', 'apply_chunking_to_forward', 'prune_layer'] _import_structure['sagemaker'] = [] _import_structure['time_series_utils'] = [] _import_structure['trainer'] = ['Trainer'] _import_structure['trainer_pt_utils'] = ['torch_distributed_zero_first'] _import_structure['trainer_seq2seq'] = ['Seq2SeqTrainer'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_tf_objects _import_structure['utils.dummy_tf_objects'] = [name for name in dir(dummy_tf_objects) if not name.startswith('_')] else: _import_structure['activations_tf'] = [] _import_structure['benchmark.benchmark_args_tf'] = ['TensorFlowBenchmarkArguments'] _import_structure['benchmark.benchmark_tf'] = ['TensorFlowBenchmark'] _import_structure['generation'].extend(['TFForcedBOSTokenLogitsProcessor', 'TFForcedEOSTokenLogitsProcessor', 'TFForceTokensLogitsProcessor', 'TFGenerationMixin', 'TFLogitsProcessor', 'TFLogitsProcessorList', 'TFLogitsWarper', 'TFMinLengthLogitsProcessor', 'TFNoBadWordsLogitsProcessor', 'TFNoRepeatNGramLogitsProcessor', 'TFRepetitionPenaltyLogitsProcessor', 'TFSuppressTokensAtBeginLogitsProcessor', 'TFSuppressTokensLogitsProcessor', 'TFTemperatureLogitsWarper', 'TFTopKLogitsWarper', 'TFTopPLogitsWarper']) _import_structure['keras_callbacks'] = ['KerasMetricCallback', 'PushToHubCallback'] _import_structure['modeling_tf_outputs'] = [] _import_structure['modeling_tf_utils'] = ['TFPreTrainedModel', 'TFSequenceSummary', 'TFSharedEmbeddings', 'shape_list'] _import_structure['models.albert'].extend(['TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel']) _import_structure['models.auto'].extend(['TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_CAUSAL_LM_MAPPING', 'TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING', 'TF_MODEL_FOR_MASKED_LM_MAPPING', 'TF_MODEL_FOR_MASK_GENERATION_MAPPING', 'TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'TF_MODEL_FOR_PRETRAINING_MAPPING', 'TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING', 'TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_TEXT_ENCODING_MAPPING', 'TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_VISION_2_SEQ_MAPPING', 'TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING', 'TF_MODEL_MAPPING', 'TF_MODEL_WITH_LM_HEAD_MAPPING', 'TFAutoModel', 'TFAutoModelForAudioClassification', 'TFAutoModelForCausalLM', 'TFAutoModelForDocumentQuestionAnswering', 'TFAutoModelForImageClassification', 'TFAutoModelForMaskedImageModeling', 'TFAutoModelForMaskedLM', 'TFAutoModelForMaskGeneration', 'TFAutoModelForMultipleChoice', 'TFAutoModelForNextSentencePrediction', 'TFAutoModelForPreTraining', 'TFAutoModelForQuestionAnswering', 'TFAutoModelForSemanticSegmentation', 'TFAutoModelForSeq2SeqLM', 'TFAutoModelForSequenceClassification', 'TFAutoModelForSpeechSeq2Seq', 'TFAutoModelForTableQuestionAnswering', 'TFAutoModelForTextEncoding', 'TFAutoModelForTokenClassification', 'TFAutoModelForVision2Seq', 'TFAutoModelForZeroShotImageClassification', 'TFAutoModelWithLMHead']) _import_structure['models.bart'].extend(['TFBartForConditionalGeneration', 'TFBartForSequenceClassification', 'TFBartModel', 'TFBartPretrainedModel']) _import_structure['models.bert'].extend(['TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel']) _import_structure['models.blenderbot'].extend(['TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel']) _import_structure['models.blenderbot_small'].extend(['TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel']) _import_structure['models.blip'].extend(['TFBlipForConditionalGeneration', 'TFBlipForImageTextRetrieval', 'TFBlipForQuestionAnswering', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipTextModel', 'TFBlipVisionModel']) _import_structure['models.camembert'].extend(['TFCamembertForCausalLM', 'TFCamembertForMaskedLM', 'TFCamembertForMultipleChoice', 'TFCamembertForQuestionAnswering', 'TFCamembertForSequenceClassification', 'TFCamembertForTokenClassification', 'TFCamembertModel', 'TFCamembertPreTrainedModel']) _import_structure['models.clip'].extend(['TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel']) _import_structure['models.convbert'].extend(['TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertModel', 'TFConvBertPreTrainedModel']) _import_structure['models.convnext'].extend(['TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel']) _import_structure['models.convnextv2'].extend(['TFConvNextV2ForImageClassification', 'TFConvNextV2Model', 'TFConvNextV2PreTrainedModel']) _import_structure['models.ctrl'].extend(['TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel']) _import_structure['models.cvt'].extend(['TFCvtForImageClassification', 'TFCvtModel', 'TFCvtPreTrainedModel']) _import_structure['models.data2vec'].extend(['TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel']) _import_structure['models.deberta'].extend(['TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel']) _import_structure['models.deberta_v2'].extend(['TFDebertaV2ForMaskedLM', 'TFDebertaV2ForMultipleChoice', 'TFDebertaV2ForQuestionAnswering', 'TFDebertaV2ForSequenceClassification', 'TFDebertaV2ForTokenClassification', 'TFDebertaV2Model', 'TFDebertaV2PreTrainedModel']) _import_structure['models.deit'].extend(['TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel']) _import_structure['models.deprecated.efficientformer'].extend(['TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher', 'TFEfficientFormerModel', 'TFEfficientFormerPreTrainedModel']) _import_structure['models.deprecated.transfo_xl'].extend(['TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel']) _import_structure['models.distilbert'].extend(['TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel']) _import_structure['models.dpr'].extend(['TFDPRContextEncoder', 'TFDPRPretrainedContextEncoder', 'TFDPRPretrainedQuestionEncoder', 'TFDPRPretrainedReader', 'TFDPRQuestionEncoder', 'TFDPRReader']) _import_structure['models.electra'].extend(['TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel']) _import_structure['models.encoder_decoder'].append('TFEncoderDecoderModel') _import_structure['models.esm'].extend(['TFEsmForMaskedLM', 'TFEsmForSequenceClassification', 'TFEsmForTokenClassification', 'TFEsmModel', 'TFEsmPreTrainedModel']) _import_structure['models.flaubert'].extend(['TFFlaubertForMultipleChoice', 'TFFlaubertForQuestionAnsweringSimple', 'TFFlaubertForSequenceClassification', 'TFFlaubertForTokenClassification', 'TFFlaubertModel', 'TFFlaubertPreTrainedModel', 'TFFlaubertWithLMHeadModel']) _import_structure['models.funnel'].extend(['TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel']) _import_structure['models.gpt2'].extend(['TFGPT2DoubleHeadsModel', 'TFGPT2ForSequenceClassification', 'TFGPT2LMHeadModel', 'TFGPT2MainLayer', 'TFGPT2Model', 'TFGPT2PreTrainedModel']) _import_structure['models.gptj'].extend(['TFGPTJForCausalLM', 'TFGPTJForQuestionAnswering', 'TFGPTJForSequenceClassification', 'TFGPTJModel', 'TFGPTJPreTrainedModel']) _import_structure['models.groupvit'].extend(['TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel']) _import_structure['models.hubert'].extend(['TFHubertForCTC', 'TFHubertModel', 'TFHubertPreTrainedModel']) _import_structure['models.idefics'].extend(['TFIdeficsForVisionText2Text', 'TFIdeficsModel', 'TFIdeficsPreTrainedModel']) _import_structure['models.layoutlm'].extend(['TFLayoutLMForMaskedLM', 'TFLayoutLMForQuestionAnswering', 'TFLayoutLMForSequenceClassification', 'TFLayoutLMForTokenClassification', 'TFLayoutLMMainLayer', 'TFLayoutLMModel', 'TFLayoutLMPreTrainedModel']) _import_structure['models.layoutlmv3'].extend(['TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel']) _import_structure['models.led'].extend(['TFLEDForConditionalGeneration', 'TFLEDModel', 'TFLEDPreTrainedModel']) _import_structure['models.longformer'].extend(['TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel']) _import_structure['models.lxmert'].extend(['TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder']) _import_structure['models.marian'].extend(['TFMarianModel', 'TFMarianMTModel', 'TFMarianPreTrainedModel']) _import_structure['models.mbart'].extend(['TFMBartForConditionalGeneration', 'TFMBartModel', 'TFMBartPreTrainedModel']) _import_structure['models.mistral'].extend(['TFMistralForCausalLM', 'TFMistralForSequenceClassification', 'TFMistralModel', 'TFMistralPreTrainedModel']) _import_structure['models.mobilebert'].extend(['TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel']) _import_structure['models.mobilevit'].extend(['TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel']) _import_structure['models.mpnet'].extend(['TFMPNetForMaskedLM', 'TFMPNetForMultipleChoice', 'TFMPNetForQuestionAnswering', 'TFMPNetForSequenceClassification', 'TFMPNetForTokenClassification', 'TFMPNetMainLayer', 'TFMPNetModel', 'TFMPNetPreTrainedModel']) _import_structure['models.mt5'].extend(['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']) _import_structure['models.openai'].extend(['TFOpenAIGPTDoubleHeadsModel', 'TFOpenAIGPTForSequenceClassification', 'TFOpenAIGPTLMHeadModel', 'TFOpenAIGPTMainLayer', 'TFOpenAIGPTModel', 'TFOpenAIGPTPreTrainedModel']) _import_structure['models.opt'].extend(['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']) _import_structure['models.pegasus'].extend(['TFPegasusForConditionalGeneration', 'TFPegasusModel', 'TFPegasusPreTrainedModel']) _import_structure['models.rag'].extend(['TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration']) _import_structure['models.regnet'].extend(['TFRegNetForImageClassification', 'TFRegNetModel', 'TFRegNetPreTrainedModel']) _import_structure['models.rembert'].extend(['TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertModel', 'TFRemBertPreTrainedModel']) _import_structure['models.resnet'].extend(['TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel']) _import_structure['models.roberta'].extend(['TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel']) _import_structure['models.roberta_prelayernorm'].extend(['TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel']) _import_structure['models.roformer'].extend(['TFRoFormerForCausalLM', 'TFRoFormerForMaskedLM', 'TFRoFormerForMultipleChoice', 'TFRoFormerForQuestionAnswering', 'TFRoFormerForSequenceClassification', 'TFRoFormerForTokenClassification', 'TFRoFormerModel', 'TFRoFormerPreTrainedModel']) _import_structure['models.sam'].extend(['TFSamModel', 'TFSamPreTrainedModel']) _import_structure['models.segformer'].extend(['TFSegformerDecodeHead', 'TFSegformerForImageClassification', 'TFSegformerForSemanticSegmentation', 'TFSegformerModel', 'TFSegformerPreTrainedModel']) _import_structure['models.speech_to_text'].extend(['TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel']) _import_structure['models.swiftformer'].extend(['TFSwiftFormerForImageClassification', 'TFSwiftFormerModel', 'TFSwiftFormerPreTrainedModel']) _import_structure['models.swin'].extend(['TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel']) _import_structure['models.t5'].extend(['TFT5EncoderModel', 'TFT5ForConditionalGeneration', 'TFT5Model', 'TFT5PreTrainedModel']) _import_structure['models.tapas'].extend(['TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel']) _import_structure['models.vision_encoder_decoder'].extend(['TFVisionEncoderDecoderModel']) _import_structure['models.vision_text_dual_encoder'].extend(['TFVisionTextDualEncoderModel']) _import_structure['models.vit'].extend(['TFViTForImageClassification', 'TFViTModel', 'TFViTPreTrainedModel']) _import_structure['models.vit_mae'].extend(['TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel']) _import_structure['models.wav2vec2'].extend(['TFWav2Vec2ForCTC', 'TFWav2Vec2ForSequenceClassification', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel']) _import_structure['models.whisper'].extend(['TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel']) _import_structure['models.xglm'].extend(['TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel']) _import_structure['models.xlm'].extend(['TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel']) _import_structure['models.xlm_roberta'].extend(['TFXLMRobertaForCausalLM', 'TFXLMRobertaForMaskedLM', 'TFXLMRobertaForMultipleChoice', 'TFXLMRobertaForQuestionAnswering', 'TFXLMRobertaForSequenceClassification', 'TFXLMRobertaForTokenClassification', 'TFXLMRobertaModel', 'TFXLMRobertaPreTrainedModel']) _import_structure['models.xlnet'].extend(['TFXLNetForMultipleChoice', 'TFXLNetForQuestionAnsweringSimple', 'TFXLNetForSequenceClassification', 'TFXLNetForTokenClassification', 'TFXLNetLMHeadModel', 'TFXLNetMainLayer', 'TFXLNetModel', 'TFXLNetPreTrainedModel']) _import_structure['optimization_tf'] = ['AdamWeightDecay', 'GradientAccumulator', 'WarmUp', 'create_optimizer'] _import_structure['tf_utils'] = [] try: if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available() and is_pretty_midi_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects _import_structure['utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects'] = [name for name in dir(dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects) if not name.startswith('_')] else: _import_structure['models.pop2piano'].append('Pop2PianoFeatureExtractor') _import_structure['models.pop2piano'].append('Pop2PianoTokenizer') _import_structure['models.pop2piano'].append('Pop2PianoProcessor') try: if not is_torchaudio_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_torchaudio_objects _import_structure['utils.dummy_torchaudio_objects'] = [name for name in dir(dummy_torchaudio_objects) if not name.startswith('_')] else: _import_structure['models.musicgen_melody'].append('MusicgenMelodyFeatureExtractor') _import_structure['models.musicgen_melody'].append('MusicgenMelodyProcessor') try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils import dummy_flax_objects _import_structure['utils.dummy_flax_objects'] = [name for name in dir(dummy_flax_objects) if not name.startswith('_')] else: _import_structure['generation'].extend(['FlaxForcedBOSTokenLogitsProcessor', 'FlaxForcedEOSTokenLogitsProcessor', 'FlaxForceTokensLogitsProcessor', 'FlaxGenerationMixin', 'FlaxLogitsProcessor', 'FlaxLogitsProcessorList', 'FlaxLogitsWarper', 'FlaxMinLengthLogitsProcessor', 'FlaxTemperatureLogitsWarper', 'FlaxSuppressTokensAtBeginLogitsProcessor', 'FlaxSuppressTokensLogitsProcessor', 'FlaxTopKLogitsWarper', 'FlaxTopPLogitsWarper', 'FlaxWhisperTimeStampLogitsProcessor']) _import_structure['modeling_flax_outputs'] = [] _import_structure['modeling_flax_utils'] = ['FlaxPreTrainedModel'] _import_structure['models.albert'].extend(['FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel']) _import_structure['models.auto'].extend(['FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_CAUSAL_LM_MAPPING', 'FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_MASKED_LM_MAPPING', 'FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'FLAX_MODEL_FOR_PRETRAINING_MAPPING', 'FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING', 'FLAX_MODEL_MAPPING', 'FlaxAutoModel', 'FlaxAutoModelForCausalLM', 'FlaxAutoModelForImageClassification', 'FlaxAutoModelForMaskedLM', 'FlaxAutoModelForMultipleChoice', 'FlaxAutoModelForNextSentencePrediction', 'FlaxAutoModelForPreTraining', 'FlaxAutoModelForQuestionAnswering', 'FlaxAutoModelForSeq2SeqLM', 'FlaxAutoModelForSequenceClassification', 'FlaxAutoModelForSpeechSeq2Seq', 'FlaxAutoModelForTokenClassification', 'FlaxAutoModelForVision2Seq']) _import_structure['models.bart'].extend(['FlaxBartDecoderPreTrainedModel', 'FlaxBartForCausalLM', 'FlaxBartForConditionalGeneration', 'FlaxBartForQuestionAnswering', 'FlaxBartForSequenceClassification', 'FlaxBartModel', 'FlaxBartPreTrainedModel']) _import_structure['models.beit'].extend(['FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel']) _import_structure['models.bert'].extend(['FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel']) _import_structure['models.big_bird'].extend(['FlaxBigBirdForCausalLM', 'FlaxBigBirdForMaskedLM', 'FlaxBigBirdForMultipleChoice', 'FlaxBigBirdForPreTraining', 'FlaxBigBirdForQuestionAnswering', 'FlaxBigBirdForSequenceClassification', 'FlaxBigBirdForTokenClassification', 'FlaxBigBirdModel', 'FlaxBigBirdPreTrainedModel']) _import_structure['models.blenderbot'].extend(['FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel']) _import_structure['models.blenderbot_small'].extend(['FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel']) _import_structure['models.bloom'].extend(['FlaxBloomForCausalLM', 'FlaxBloomModel', 'FlaxBloomPreTrainedModel']) _import_structure['models.clip'].extend(['FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPTextModelWithProjection', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel']) _import_structure['models.dinov2'].extend(['FlaxDinov2Model', 'FlaxDinov2ForImageClassification', 'FlaxDinov2PreTrainedModel']) _import_structure['models.distilbert'].extend(['FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel']) _import_structure['models.electra'].extend(['FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel']) _import_structure['models.encoder_decoder'].append('FlaxEncoderDecoderModel') _import_structure['models.gpt2'].extend(['FlaxGPT2LMHeadModel', 'FlaxGPT2Model', 'FlaxGPT2PreTrainedModel']) _import_structure['models.gpt_neo'].extend(['FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel']) _import_structure['models.gptj'].extend(['FlaxGPTJForCausalLM', 'FlaxGPTJModel', 'FlaxGPTJPreTrainedModel']) _import_structure['models.llama'].extend(['FlaxLlamaForCausalLM', 'FlaxLlamaModel', 'FlaxLlamaPreTrainedModel']) _import_structure['models.gemma'].extend(['FlaxGemmaForCausalLM', 'FlaxGemmaModel', 'FlaxGemmaPreTrainedModel']) _import_structure['models.longt5'].extend(['FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel']) _import_structure['models.marian'].extend(['FlaxMarianModel', 'FlaxMarianMTModel', 'FlaxMarianPreTrainedModel']) _import_structure['models.mbart'].extend(['FlaxMBartForConditionalGeneration', 'FlaxMBartForQuestionAnswering', 'FlaxMBartForSequenceClassification', 'FlaxMBartModel', 'FlaxMBartPreTrainedModel']) _import_structure['models.mistral'].extend(['FlaxMistralForCausalLM', 'FlaxMistralModel', 'FlaxMistralPreTrainedModel']) _import_structure['models.mt5'].extend(['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']) _import_structure['models.opt'].extend(['FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel']) _import_structure['models.pegasus'].extend(['FlaxPegasusForConditionalGeneration', 'FlaxPegasusModel', 'FlaxPegasusPreTrainedModel']) _import_structure['models.regnet'].extend(['FlaxRegNetForImageClassification', 'FlaxRegNetModel', 'FlaxRegNetPreTrainedModel']) _import_structure['models.resnet'].extend(['FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel']) _import_structure['models.roberta'].extend(['FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel']) _import_structure['models.roberta_prelayernorm'].extend(['FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel']) _import_structure['models.roformer'].extend(['FlaxRoFormerForMaskedLM', 'FlaxRoFormerForMultipleChoice', 'FlaxRoFormerForQuestionAnswering', 'FlaxRoFormerForSequenceClassification', 'FlaxRoFormerForTokenClassification', 'FlaxRoFormerModel', 'FlaxRoFormerPreTrainedModel']) _import_structure['models.speech_encoder_decoder'].append('FlaxSpeechEncoderDecoderModel') _import_structure['models.t5'].extend(['FlaxT5EncoderModel', 'FlaxT5ForConditionalGeneration', 'FlaxT5Model', 'FlaxT5PreTrainedModel']) _import_structure['models.vision_encoder_decoder'].append('FlaxVisionEncoderDecoderModel') _import_structure['models.vision_text_dual_encoder'].extend(['FlaxVisionTextDualEncoderModel']) _import_structure['models.vit'].extend(['FlaxViTForImageClassification', 'FlaxViTModel', 'FlaxViTPreTrainedModel']) _import_structure['models.wav2vec2'].extend(['FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel']) _import_structure['models.whisper'].extend(['FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification']) _import_structure['models.xglm'].extend(['FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel']) _import_structure['models.xlm_roberta'].extend(['FlaxXLMRobertaForMaskedLM', 'FlaxXLMRobertaForMultipleChoice', 'FlaxXLMRobertaForQuestionAnswering', 'FlaxXLMRobertaForSequenceClassification', 'FlaxXLMRobertaForTokenClassification', 'FlaxXLMRobertaModel', 'FlaxXLMRobertaForCausalLM', 'FlaxXLMRobertaPreTrainedModel']) if TYPE_CHECKING: from .agents import Agent, CodeAgent, HfApiEngine, ManagedAgent, PipelineTool, ReactAgent, ReactCodeAgent, ReactJsonAgent, Tool, Toolbox, ToolCollection, TransformersEngine, launch_gradio_demo, load_tool, stream_to_gradio from .configuration_utils import PretrainedConfig from .data import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, glue_compute_metrics, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_compute_metrics, xnli_output_modes, xnli_processors, xnli_tasks_num_labels from .data.data_collator import DataCollator, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeq2Seq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithFlattening, DataCollatorWithPadding, DefaultDataCollator, default_data_collator from .feature_extraction_sequence_utils import SequenceFeatureExtractor from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .generation import GenerationConfig, TextIteratorStreamer, TextStreamer, WatermarkingConfig from .hf_argparser import HfArgumentParser from .integrations import is_clearml_available, is_comet_available, is_dvclive_available, is_neptune_available, is_optuna_available, is_ray_available, is_ray_tune_available, is_sigopt_available, is_tensorboard_available, is_wandb_available from .modelcard import ModelCard from .modeling_tf_pytorch_utils import convert_tf_weight_name_to_pt_weight_name, load_pytorch_checkpoint_in_tf2_model, load_pytorch_model_in_tf2_model, load_pytorch_weights_in_tf2_model, load_tf2_checkpoint_in_pytorch_model, load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model from .models.albert import AlbertConfig from .models.align import AlignConfig, AlignProcessor, AlignTextConfig, AlignVisionConfig from .models.altclip import AltCLIPConfig, AltCLIPProcessor, AltCLIPTextConfig, AltCLIPVisionConfig from .models.audio_spectrogram_transformer import ASTConfig, ASTFeatureExtractor from .models.auto import CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_NAMES_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer from .models.autoformer import AutoformerConfig from .models.bark import BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkProcessor, BarkSemanticConfig from .models.bart import BartConfig, BartTokenizer from .models.beit import BeitConfig from .models.bert import BasicTokenizer, BertConfig, BertTokenizer, WordpieceTokenizer from .models.bert_generation import BertGenerationConfig from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer from .models.bertweet import BertweetTokenizer from .models.big_bird import BigBirdConfig from .models.bigbird_pegasus import BigBirdPegasusConfig from .models.biogpt import BioGptConfig, BioGptTokenizer from .models.bit import BitConfig from .models.blenderbot import BlenderbotConfig, BlenderbotTokenizer from .models.blenderbot_small import BlenderbotSmallConfig, BlenderbotSmallTokenizer from .models.blip import BlipConfig, BlipProcessor, BlipTextConfig, BlipVisionConfig from .models.blip_2 import Blip2Config, Blip2Processor, Blip2QFormerConfig, Blip2VisionConfig from .models.bloom import BloomConfig from .models.bridgetower import BridgeTowerConfig, BridgeTowerProcessor, BridgeTowerTextConfig, BridgeTowerVisionConfig from .models.bros import BrosConfig, BrosProcessor from .models.byt5 import ByT5Tokenizer from .models.camembert import CamembertConfig from .models.canine import CanineConfig, CanineTokenizer from .models.chameleon import ChameleonConfig, ChameleonProcessor, ChameleonVQVAEConfig from .models.chinese_clip import ChineseCLIPConfig, ChineseCLIPProcessor, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from .models.clap import ClapAudioConfig, ClapConfig, ClapProcessor, ClapTextConfig from .models.clip import CLIPConfig, CLIPProcessor, CLIPTextConfig, CLIPTokenizer, CLIPVisionConfig from .models.clipseg import CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig from .models.clvp import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig, ClvpFeatureExtractor, ClvpProcessor, ClvpTokenizer from .models.codegen import CodeGenConfig, CodeGenTokenizer from .models.cohere import CohereConfig from .models.conditional_detr import ConditionalDetrConfig from .models.convbert import ConvBertConfig, ConvBertTokenizer from .models.convnext import ConvNextConfig from .models.convnextv2 import ConvNextV2Config from .models.cpmant import CpmAntConfig, CpmAntTokenizer from .models.ctrl import CTRLConfig, CTRLTokenizer from .models.cvt import CvtConfig from .models.dac import DacConfig, DacFeatureExtractor from .models.data2vec import Data2VecAudioConfig, Data2VecTextConfig, Data2VecVisionConfig from .models.dbrx import DbrxConfig from .models.deberta import DebertaConfig, DebertaTokenizer from .models.deberta_v2 import DebertaV2Config from .models.decision_transformer import DecisionTransformerConfig from .models.deformable_detr import DeformableDetrConfig from .models.deit import DeiTConfig from .models.deprecated.deta import DetaConfig from .models.deprecated.efficientformer import EfficientFormerConfig from .models.deprecated.ernie_m import ErnieMConfig from .models.deprecated.gptsan_japanese import GPTSanJapaneseConfig, GPTSanJapaneseTokenizer from .models.deprecated.graphormer import GraphormerConfig from .models.deprecated.jukebox import JukeboxConfig, JukeboxPriorConfig, JukeboxTokenizer, JukeboxVQVAEConfig from .models.deprecated.mctct import MCTCTConfig, MCTCTFeatureExtractor, MCTCTProcessor from .models.deprecated.mega import MegaConfig from .models.deprecated.mmbt import MMBTConfig from .models.deprecated.nat import NatConfig from .models.deprecated.nezha import NezhaConfig from .models.deprecated.open_llama import OpenLlamaConfig from .models.deprecated.qdqbert import QDQBertConfig from .models.deprecated.realm import RealmConfig, RealmTokenizer from .models.deprecated.retribert import RetriBertConfig, RetriBertTokenizer from .models.deprecated.speech_to_text_2 import Speech2Text2Config, Speech2Text2Processor, Speech2Text2Tokenizer from .models.deprecated.tapex import TapexTokenizer from .models.deprecated.trajectory_transformer import TrajectoryTransformerConfig from .models.deprecated.transfo_xl import TransfoXLConfig, TransfoXLCorpus, TransfoXLTokenizer from .models.deprecated.tvlt import TvltConfig, TvltFeatureExtractor, TvltProcessor from .models.deprecated.van import VanConfig from .models.deprecated.vit_hybrid import ViTHybridConfig from .models.deprecated.xlm_prophetnet import XLMProphetNetConfig from .models.depth_anything import DepthAnythingConfig from .models.detr import DetrConfig from .models.dinat import DinatConfig from .models.dinov2 import Dinov2Config from .models.distilbert import DistilBertConfig, DistilBertTokenizer from .models.donut import DonutProcessor, DonutSwinConfig from .models.dpr import DPRConfig, DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer from .models.dpt import DPTConfig from .models.efficientnet import EfficientNetConfig from .models.electra import ElectraConfig, ElectraTokenizer from .models.encodec import EncodecConfig, EncodecFeatureExtractor from .models.encoder_decoder import EncoderDecoderConfig from .models.ernie import ErnieConfig from .models.esm import EsmConfig, EsmTokenizer from .models.falcon import FalconConfig from .models.falcon_mamba import FalconMambaConfig from .models.fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerTokenizer, FastSpeech2ConformerWithHifiGanConfig from .models.flaubert import FlaubertConfig, FlaubertTokenizer from .models.flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig from .models.fnet import FNetConfig from .models.focalnet import FocalNetConfig from .models.fsmt import FSMTConfig, FSMTTokenizer from .models.funnel import FunnelConfig, FunnelTokenizer from .models.fuyu import FuyuConfig from .models.gemma import GemmaConfig from .models.gemma2 import Gemma2Config from .models.git import GitConfig, GitProcessor, GitVisionConfig from .models.glpn import GLPNConfig from .models.gpt2 import GPT2Config, GPT2Tokenizer from .models.gpt_bigcode import GPTBigCodeConfig from .models.gpt_neo import GPTNeoConfig from .models.gpt_neox import GPTNeoXConfig from .models.gpt_neox_japanese import GPTNeoXJapaneseConfig from .models.gptj import GPTJConfig from .models.granite import GraniteConfig from .models.grounding_dino import GroundingDinoConfig, GroundingDinoProcessor from .models.groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from .models.herbert import HerbertTokenizer from .models.hiera import HieraConfig from .models.hubert import HubertConfig from .models.ibert import IBertConfig from .models.idefics import IdeficsConfig from .models.idefics2 import Idefics2Config from .models.imagegpt import ImageGPTConfig from .models.informer import InformerConfig from .models.instructblip import InstructBlipConfig, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig from .models.instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoProcessor, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig from .models.jamba import JambaConfig from .models.jetmoe import JetMoeConfig from .models.kosmos2 import Kosmos2Config, Kosmos2Processor from .models.layoutlm import LayoutLMConfig, LayoutLMTokenizer from .models.layoutlmv2 import LayoutLMv2Config, LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor, LayoutLMv2Processor, LayoutLMv2Tokenizer from .models.layoutlmv3 import LayoutLMv3Config, LayoutLMv3FeatureExtractor, LayoutLMv3ImageProcessor, LayoutLMv3Processor, LayoutLMv3Tokenizer from .models.layoutxlm import LayoutXLMProcessor from .models.led import LEDConfig, LEDTokenizer from .models.levit import LevitConfig from .models.lilt import LiltConfig from .models.llama import LlamaConfig from .models.llava import LlavaConfig, LlavaProcessor from .models.llava_next import LlavaNextConfig, LlavaNextProcessor from .models.llava_next_video import LlavaNextVideoConfig, LlavaNextVideoProcessor from .models.llava_onevision import LlavaOnevisionConfig, LlavaOnevisionProcessor from .models.longformer import LongformerConfig, LongformerTokenizer from .models.longt5 import LongT5Config from .models.luke import LukeConfig, LukeTokenizer from .models.lxmert import LxmertConfig, LxmertTokenizer from .models.m2m_100 import M2M100Config from .models.mamba import MambaConfig from .models.mamba2 import Mamba2Config from .models.marian import MarianConfig from .models.markuplm import MarkupLMConfig, MarkupLMFeatureExtractor, MarkupLMProcessor, MarkupLMTokenizer from .models.mask2former import Mask2FormerConfig from .models.maskformer import MaskFormerConfig, MaskFormerSwinConfig from .models.mbart import MBartConfig from .models.megatron_bert import MegatronBertConfig from .models.mgp_str import MgpstrConfig, MgpstrProcessor, MgpstrTokenizer from .models.mistral import MistralConfig from .models.mixtral import MixtralConfig from .models.mobilebert import MobileBertConfig, MobileBertTokenizer from .models.mobilenet_v1 import MobileNetV1Config from .models.mobilenet_v2 import MobileNetV2Config from .models.mobilevit import MobileViTConfig from .models.mobilevitv2 import MobileViTV2Config from .models.mpnet import MPNetConfig, MPNetTokenizer from .models.mpt import MptConfig from .models.mra import MraConfig from .models.mt5 import MT5Config from .models.musicgen import MusicgenConfig, MusicgenDecoderConfig from .models.musicgen_melody import MusicgenMelodyConfig, MusicgenMelodyDecoderConfig from .models.mvp import MvpConfig, MvpTokenizer from .models.nemotron import NemotronConfig from .models.nllb_moe import NllbMoeConfig from .models.nougat import NougatProcessor from .models.nystromformer import NystromformerConfig from .models.olmo import OlmoConfig from .models.olmoe import OlmoeConfig from .models.oneformer import OneFormerConfig, OneFormerProcessor from .models.openai import OpenAIGPTConfig, OpenAIGPTTokenizer from .models.opt import OPTConfig from .models.owlv2 import Owlv2Config, Owlv2Processor, Owlv2TextConfig, Owlv2VisionConfig from .models.owlvit import OwlViTConfig, OwlViTProcessor, OwlViTTextConfig, OwlViTVisionConfig from .models.paligemma import PaliGemmaConfig from .models.patchtsmixer import PatchTSMixerConfig from .models.patchtst import PatchTSTConfig from .models.pegasus import PegasusConfig, PegasusTokenizer from .models.pegasus_x import PegasusXConfig from .models.perceiver import PerceiverConfig, PerceiverTokenizer from .models.persimmon import PersimmonConfig from .models.phi import PhiConfig from .models.phi3 import Phi3Config from .models.phobert import PhobertTokenizer from .models.pix2struct import Pix2StructConfig, Pix2StructProcessor, Pix2StructTextConfig, Pix2StructVisionConfig from .models.pixtral import PixtralProcessor, PixtralVisionConfig from .models.plbart import PLBartConfig from .models.poolformer import PoolFormerConfig from .models.pop2piano import Pop2PianoConfig from .models.prophetnet import ProphetNetConfig, ProphetNetTokenizer from .models.pvt import PvtConfig from .models.pvt_v2 import PvtV2Config from .models.qwen2 import Qwen2Config, Qwen2Tokenizer from .models.qwen2_audio import Qwen2AudioConfig, Qwen2AudioEncoderConfig, Qwen2AudioProcessor from .models.qwen2_moe import Qwen2MoeConfig from .models.qwen2_vl import Qwen2VLConfig, Qwen2VLProcessor from .models.rag import RagConfig, RagRetriever, RagTokenizer from .models.recurrent_gemma import RecurrentGemmaConfig from .models.reformer import ReformerConfig from .models.regnet import RegNetConfig from .models.rembert import RemBertConfig from .models.resnet import ResNetConfig from .models.roberta import RobertaConfig, RobertaTokenizer from .models.roberta_prelayernorm import RobertaPreLayerNormConfig from .models.roc_bert import RoCBertConfig, RoCBertTokenizer from .models.roformer import RoFormerConfig, RoFormerTokenizer from .models.rt_detr import RTDetrConfig, RTDetrResNetConfig from .models.rwkv import RwkvConfig from .models.sam import SamConfig, SamMaskDecoderConfig, SamProcessor, SamPromptEncoderConfig, SamVisionConfig from .models.seamless_m4t import SeamlessM4TConfig, SeamlessM4TFeatureExtractor, SeamlessM4TProcessor from .models.seamless_m4t_v2 import SeamlessM4Tv2Config from .models.segformer import SegformerConfig from .models.seggpt import SegGptConfig from .models.sew import SEWConfig from .models.sew_d import SEWDConfig from .models.siglip import SiglipConfig, SiglipProcessor, SiglipTextConfig, SiglipVisionConfig from .models.speech_encoder_decoder import SpeechEncoderDecoderConfig from .models.speech_to_text import Speech2TextConfig, Speech2TextFeatureExtractor, Speech2TextProcessor from .models.speecht5 import SpeechT5Config, SpeechT5FeatureExtractor, SpeechT5HifiGanConfig, SpeechT5Processor from .models.splinter import SplinterConfig, SplinterTokenizer from .models.squeezebert import SqueezeBertConfig, SqueezeBertTokenizer from .models.stablelm import StableLmConfig from .models.starcoder2 import Starcoder2Config from .models.superpoint import SuperPointConfig from .models.swiftformer import SwiftFormerConfig from .models.swin import SwinConfig from .models.swin2sr import Swin2SRConfig from .models.swinv2 import Swinv2Config from .models.switch_transformers import SwitchTransformersConfig from .models.t5 import T5Config from .models.table_transformer import TableTransformerConfig from .models.tapas import TapasConfig, TapasTokenizer from .models.time_series_transformer import TimeSeriesTransformerConfig from .models.timesformer import TimesformerConfig from .models.timm_backbone import TimmBackboneConfig from .models.trocr import TrOCRConfig, TrOCRProcessor from .models.tvp import TvpConfig, TvpProcessor from .models.udop import UdopConfig, UdopProcessor from .models.umt5 import UMT5Config from .models.unispeech import UniSpeechConfig from .models.unispeech_sat import UniSpeechSatConfig from .models.univnet import UnivNetConfig, UnivNetFeatureExtractor from .models.upernet import UperNetConfig from .models.video_llava import VideoLlavaConfig from .models.videomae import VideoMAEConfig from .models.vilt import ViltConfig, ViltFeatureExtractor, ViltImageProcessor, ViltProcessor from .models.vipllava import VipLlavaConfig from .models.vision_encoder_decoder import VisionEncoderDecoderConfig from .models.vision_text_dual_encoder import VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor from .models.visual_bert import VisualBertConfig from .models.vit import ViTConfig from .models.vit_mae import ViTMAEConfig from .models.vit_msn import ViTMSNConfig from .models.vitdet import VitDetConfig from .models.vitmatte import VitMatteConfig from .models.vits import VitsConfig, VitsTokenizer from .models.vivit import VivitConfig from .models.wav2vec2 import Wav2Vec2Config, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, Wav2Vec2Tokenizer from .models.wav2vec2_bert import Wav2Vec2BertConfig, Wav2Vec2BertProcessor from .models.wav2vec2_conformer import Wav2Vec2ConformerConfig from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from .models.wavlm import WavLMConfig from .models.whisper import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor, WhisperTokenizer from .models.x_clip import XCLIPConfig, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig from .models.xglm import XGLMConfig from .models.xlm import XLMConfig, XLMTokenizer from .models.xlm_roberta import XLMRobertaConfig from .models.xlm_roberta_xl import XLMRobertaXLConfig from .models.xlnet import XLNetConfig from .models.xmod import XmodConfig from .models.yolos import YolosConfig from .models.yoso import YosoConfig from .models.zoedepth import ZoeDepthConfig from .pipelines import AudioClassificationPipeline, AutomaticSpeechRecognitionPipeline, CsvPipelineDataFormat, DepthEstimationPipeline, DocumentQuestionAnsweringPipeline, FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, ImageFeatureExtractionPipeline, ImageSegmentationPipeline, ImageToImagePipeline, ImageToTextPipeline, JsonPipelineDataFormat, MaskGenerationPipeline, NerPipeline, ObjectDetectionPipeline, PipedPipelineDataFormat, Pipeline, PipelineDataFormat, QuestionAnsweringPipeline, SummarizationPipeline, TableQuestionAnsweringPipeline, Text2TextGenerationPipeline, TextClassificationPipeline, TextGenerationPipeline, TextToAudioPipeline, TokenClassificationPipeline, TranslationPipeline, VideoClassificationPipeline, VisualQuestionAnsweringPipeline, ZeroShotAudioClassificationPipeline, ZeroShotClassificationPipeline, ZeroShotImageClassificationPipeline, ZeroShotObjectDetectionPipeline, pipeline from .processing_utils import ProcessorMixin from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import AddedToken, BatchEncoding, CharSpan, PreTrainedTokenizerBase, SpecialTokensMixin, TokenSpan from .trainer_callback import DefaultFlowCallback, EarlyStoppingCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState from .trainer_utils import EvalPrediction, IntervalStrategy, SchedulerType, enable_full_determinism, set_seed from .training_args import TrainingArguments from .training_args_seq2seq import Seq2SeqTrainingArguments from .training_args_tf import TFTrainingArguments from .utils import CONFIG_NAME, MODEL_CARD_NAME, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, TensorType, add_end_docstrings, add_start_docstrings, is_apex_available, is_av_available, is_bitsandbytes_available, is_datasets_available, is_decord_available, is_faiss_available, is_flax_available, is_keras_nlp_available, is_phonemizer_available, is_psutil_available, is_py3nvml_available, is_pyctcdecode_available, is_sacremoses_available, is_safetensors_available, is_scipy_available, is_sentencepiece_available, is_sklearn_available, is_speech_available, is_tensorflow_text_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_mlu_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_tpu_available, is_torch_xla_available, is_torch_xpu_available, is_torchvision_available, is_vision_available, logging from .utils.quantization_config import AqlmConfig, AwqConfig, BitsAndBytesConfig, EetqConfig, FbgemmFp8Config, GPTQConfig, HqqConfig, QuantoConfig, TorchAoConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_sentencepiece_objects import * else: from .models.albert import AlbertTokenizer from .models.barthez import BarthezTokenizer from .models.bartpho import BartphoTokenizer from .models.bert_generation import BertGenerationTokenizer from .models.big_bird import BigBirdTokenizer from .models.camembert import CamembertTokenizer from .models.code_llama import CodeLlamaTokenizer from .models.cpm import CpmTokenizer from .models.deberta_v2 import DebertaV2Tokenizer from .models.deprecated.ernie_m import ErnieMTokenizer from .models.deprecated.xlm_prophetnet import XLMProphetNetTokenizer from .models.fnet import FNetTokenizer from .models.gemma import GemmaTokenizer from .models.gpt_sw3 import GPTSw3Tokenizer from .models.layoutxlm import LayoutXLMTokenizer from .models.llama import LlamaTokenizer from .models.m2m_100 import M2M100Tokenizer from .models.marian import MarianTokenizer from .models.mbart import MBartTokenizer from .models.mbart50 import MBart50Tokenizer from .models.mluke import MLukeTokenizer from .models.mt5 import MT5Tokenizer from .models.nllb import NllbTokenizer from .models.pegasus import PegasusTokenizer from .models.plbart import PLBartTokenizer from .models.reformer import ReformerTokenizer from .models.rembert import RemBertTokenizer from .models.seamless_m4t import SeamlessM4TTokenizer from .models.siglip import SiglipTokenizer from .models.speech_to_text import Speech2TextTokenizer from .models.speecht5 import SpeechT5Tokenizer from .models.t5 import T5Tokenizer from .models.udop import UdopTokenizer from .models.xglm import XGLMTokenizer from .models.xlm_roberta import XLMRobertaTokenizer from .models.xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_tokenizers_objects import * else: from .models.albert import AlbertTokenizerFast from .models.bart import BartTokenizerFast from .models.barthez import BarthezTokenizerFast from .models.bert import BertTokenizerFast from .models.big_bird import BigBirdTokenizerFast from .models.blenderbot import BlenderbotTokenizerFast from .models.blenderbot_small import BlenderbotSmallTokenizerFast from .models.bloom import BloomTokenizerFast from .models.camembert import CamembertTokenizerFast from .models.clip import CLIPTokenizerFast from .models.code_llama import CodeLlamaTokenizerFast from .models.codegen import CodeGenTokenizerFast from .models.cohere import CohereTokenizerFast from .models.convbert import ConvBertTokenizerFast from .models.cpm import CpmTokenizerFast from .models.deberta import DebertaTokenizerFast from .models.deberta_v2 import DebertaV2TokenizerFast from .models.deprecated.realm import RealmTokenizerFast from .models.deprecated.retribert import RetriBertTokenizerFast from .models.distilbert import DistilBertTokenizerFast from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast from .models.electra import ElectraTokenizerFast from .models.fnet import FNetTokenizerFast from .models.funnel import FunnelTokenizerFast from .models.gemma import GemmaTokenizerFast from .models.gpt2 import GPT2TokenizerFast from .models.gpt_neox import GPTNeoXTokenizerFast from .models.gpt_neox_japanese import GPTNeoXJapaneseTokenizer from .models.herbert import HerbertTokenizerFast from .models.layoutlm import LayoutLMTokenizerFast from .models.layoutlmv2 import LayoutLMv2TokenizerFast from .models.layoutlmv3 import LayoutLMv3TokenizerFast from .models.layoutxlm import LayoutXLMTokenizerFast from .models.led import LEDTokenizerFast from .models.llama import LlamaTokenizerFast from .models.longformer import LongformerTokenizerFast from .models.lxmert import LxmertTokenizerFast from .models.markuplm import MarkupLMTokenizerFast from .models.mbart import MBartTokenizerFast from .models.mbart50 import MBart50TokenizerFast from .models.mobilebert import MobileBertTokenizerFast from .models.mpnet import MPNetTokenizerFast from .models.mt5 import MT5TokenizerFast from .models.mvp import MvpTokenizerFast from .models.nllb import NllbTokenizerFast from .models.nougat import NougatTokenizerFast from .models.openai import OpenAIGPTTokenizerFast from .models.pegasus import PegasusTokenizerFast from .models.qwen2 import Qwen2TokenizerFast from .models.reformer import ReformerTokenizerFast from .models.rembert import RemBertTokenizerFast from .models.roberta import RobertaTokenizerFast from .models.roformer import RoFormerTokenizerFast from .models.seamless_m4t import SeamlessM4TTokenizerFast from .models.splinter import SplinterTokenizerFast from .models.squeezebert import SqueezeBertTokenizerFast from .models.t5 import T5TokenizerFast from .models.udop import UdopTokenizerFast from .models.whisper import WhisperTokenizerFast from .models.xglm import XGLMTokenizerFast from .models.xlm_roberta import XLMRobertaTokenizerFast from .models.xlnet import XLNetTokenizerFast from .tokenization_utils_fast import PreTrainedTokenizerFast try: if not (is_sentencepiece_available() and is_tokenizers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummies_sentencepiece_and_tokenizers_objects import * else: from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_tensorflow_text_objects import * else: from .models.bert import TFBertTokenizer try: if not is_keras_nlp_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_keras_nlp_objects import * else: from .models.gpt2 import TFGPT2Tokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_vision_objects import * else: from .image_processing_base import ImageProcessingMixin from .image_processing_utils import BaseImageProcessor from .image_utils import ImageFeatureExtractionMixin from .models.beit import BeitFeatureExtractor, BeitImageProcessor from .models.bit import BitImageProcessor from .models.blip import BlipImageProcessor from .models.bridgetower import BridgeTowerImageProcessor from .models.chameleon import ChameleonImageProcessor from .models.chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor from .models.clip import CLIPFeatureExtractor, CLIPImageProcessor from .models.conditional_detr import ConditionalDetrFeatureExtractor, ConditionalDetrImageProcessor from .models.convnext import ConvNextFeatureExtractor, ConvNextImageProcessor from .models.deformable_detr import DeformableDetrFeatureExtractor, DeformableDetrImageProcessor from .models.deit import DeiTFeatureExtractor, DeiTImageProcessor from .models.deprecated.deta import DetaImageProcessor from .models.deprecated.efficientformer import EfficientFormerImageProcessor from .models.deprecated.tvlt import TvltImageProcessor from .models.deprecated.vit_hybrid import ViTHybridImageProcessor from .models.detr import DetrFeatureExtractor, DetrImageProcessor from .models.donut import DonutFeatureExtractor, DonutImageProcessor from .models.dpt import DPTFeatureExtractor, DPTImageProcessor from .models.efficientnet import EfficientNetImageProcessor from .models.flava import FlavaFeatureExtractor, FlavaImageProcessor, FlavaProcessor from .models.fuyu import FuyuImageProcessor, FuyuProcessor from .models.glpn import GLPNFeatureExtractor, GLPNImageProcessor from .models.grounding_dino import GroundingDinoImageProcessor from .models.idefics import IdeficsImageProcessor from .models.idefics2 import Idefics2ImageProcessor from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor from .models.instructblipvideo import InstructBlipVideoImageProcessor from .models.layoutlmv2 import LayoutLMv2FeatureExtractor, LayoutLMv2ImageProcessor from .models.layoutlmv3 import LayoutLMv3FeatureExtractor, LayoutLMv3ImageProcessor from .models.levit import LevitFeatureExtractor, LevitImageProcessor from .models.llava_next import LlavaNextImageProcessor from .models.llava_next_video import LlavaNextVideoImageProcessor from .models.llava_onevision import LlavaOnevisionImageProcessor, LlavaOnevisionVideoProcessor from .models.mask2former import Mask2FormerImageProcessor from .models.maskformer import MaskFormerFeatureExtractor, MaskFormerImageProcessor from .models.mobilenet_v1 import MobileNetV1FeatureExtractor, MobileNetV1ImageProcessor from .models.mobilenet_v2 import MobileNetV2FeatureExtractor, MobileNetV2ImageProcessor from .models.mobilevit import MobileViTFeatureExtractor, MobileViTImageProcessor from .models.nougat import NougatImageProcessor from .models.oneformer import OneFormerImageProcessor from .models.owlv2 import Owlv2ImageProcessor from .models.owlvit import OwlViTFeatureExtractor, OwlViTImageProcessor from .models.perceiver import PerceiverFeatureExtractor, PerceiverImageProcessor from .models.pix2struct import Pix2StructImageProcessor from .models.pixtral import PixtralImageProcessor from .models.poolformer import PoolFormerFeatureExtractor, PoolFormerImageProcessor from .models.pvt import PvtImageProcessor from .models.qwen2_vl import Qwen2VLImageProcessor from .models.rt_detr import RTDetrImageProcessor from .models.sam import SamImageProcessor from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor from .models.seggpt import SegGptImageProcessor from .models.siglip import SiglipImageProcessor from .models.superpoint import SuperPointImageProcessor from .models.swin2sr import Swin2SRImageProcessor from .models.tvp import TvpImageProcessor from .models.video_llava import VideoLlavaImageProcessor from .models.videomae import VideoMAEFeatureExtractor, VideoMAEImageProcessor from .models.vilt import ViltFeatureExtractor, ViltImageProcessor, ViltProcessor from .models.vit import ViTFeatureExtractor, ViTImageProcessor from .models.vitmatte import VitMatteImageProcessor from .models.vivit import VivitImageProcessor from .models.yolos import YolosFeatureExtractor, YolosImageProcessor from .models.zoedepth import ZoeDepthImageProcessor try: if not is_torchvision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torchvision_objects import * else: from .image_processing_utils_fast import BaseImageProcessorFast from .models.vit import ViTImageProcessorFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * else: from .benchmark.benchmark import PyTorchBenchmark from .benchmark.benchmark_args import PyTorchBenchmarkArguments from .cache_utils import Cache, CacheConfig, DynamicCache, EncoderDecoderCache, HQQQuantizedCache, HybridCache, MambaCache, OffloadedCache, OffloadedStaticCache, QuantizedCache, QuantizedCacheConfig, QuantoQuantizedCache, SinkCache, SlidingWindowCache, StaticCache from .data.datasets import GlueDataset, GlueDataTrainingArguments, LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, SquadDataset, SquadDataTrainingArguments, TextDataset, TextDatasetForNextSentencePrediction from .generation import AlternatingCodebooksLogitsProcessor, BeamScorer, BeamSearchScorer, ClassifierFreeGuidanceLogitsProcessor, ConstrainedBeamSearchScorer, Constraint, ConstraintListState, DisjunctiveConstraint, EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EosTokenCriteria, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GenerationMixin, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessor, LogitsProcessorList, LogitsWarper, MaxLengthCriteria, MaxTimeCriteria, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, MinPLogitsWarper, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, StoppingCriteria, StoppingCriteriaList, StopStringCriteria, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, WatermarkDetector, WatermarkLogitsProcessor, WhisperTimeStampLogitsProcessor from .integrations.executorch import TorchExportableModuleWithStaticCache, convert_and_export_with_cache from .modeling_rope_utils import ROPE_INIT_FUNCTIONS from .modeling_utils import PreTrainedModel from .models.albert import AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert from .models.align import AlignModel, AlignPreTrainedModel, AlignTextModel, AlignVisionModel from .models.altclip import AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel from .models.audio_spectrogram_transformer import ASTForAudioClassification, ASTModel, ASTPreTrainedModel from .models.auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_XVECTOR_MAPPING, MODEL_FOR_BACKBONE_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, MODEL_FOR_DEPTH_ESTIMATION_MAPPING, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_KEYPOINT_DETECTION_MAPPING, MODEL_FOR_MASK_GENERATION_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoBackbone, AutoModel, AutoModelForAudioClassification, AutoModelForAudioFrameClassification, AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, AutoModelForDepthEstimation, AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForImageToImage, AutoModelForInstanceSegmentation, AutoModelForKeypointDetection, AutoModelForMaskedImageModeling, AutoModelForMaskedLM, AutoModelForMaskGeneration, AutoModelForMultipleChoice, AutoModelForNextSentencePrediction, AutoModelForObjectDetection, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTextEncoding, AutoModelForTextToSpectrogram, AutoModelForTextToWaveform, AutoModelForTokenClassification, AutoModelForUniversalSegmentation, AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelForZeroShotImageClassification, AutoModelForZeroShotObjectDetection, AutoModelWithLMHead from .models.autoformer import AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel from .models.bark import BarkCausalModel, BarkCoarseModel, BarkFineModel, BarkModel, BarkPreTrainedModel, BarkSemanticModel from .models.bart import BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, BartPreTrainedModel, BartPretrainedModel, PretrainedBartModel from .models.beit import BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel from .models.bert import BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert from .models.bert_generation import BertGenerationDecoder, BertGenerationEncoder, BertGenerationPreTrainedModel, load_tf_weights_in_bert_generation from .models.big_bird import BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, BigBirdPreTrainedModel, load_tf_weights_in_big_bird from .models.bigbird_pegasus import BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel from .models.biogpt import BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel from .models.bit import BitBackbone, BitForImageClassification, BitModel, BitPreTrainedModel from .models.blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel from .models.blenderbot_small import BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel from .models.blip import BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel from .models.blip_2 import Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval, Blip2Model, Blip2PreTrainedModel, Blip2QFormerModel, Blip2TextModelWithProjection, Blip2VisionModel, Blip2VisionModelWithProjection from .models.bloom import BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel from .models.bridgetower import BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel from .models.bros import BrosForTokenClassification, BrosModel, BrosPreTrainedModel, BrosProcessor, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification from .models.camembert import CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, CamembertPreTrainedModel from .models.canine import CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine from .models.chameleon import ChameleonForConditionalGeneration, ChameleonModel, ChameleonPreTrainedModel, ChameleonProcessor, ChameleonVQVAE from .models.chinese_clip import ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel from .models.clap import ClapAudioModel, ClapAudioModelWithProjection, ClapFeatureExtractor, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection from .models.clip import CLIPForImageClassification, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection from .models.clipseg import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel from .models.clvp import ClvpDecoder, ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration, ClvpPreTrainedModel from .models.codegen import CodeGenForCausalLM, CodeGenModel, CodeGenPreTrainedModel from .models.cohere import CohereForCausalLM, CohereModel, CoherePreTrainedModel from .models.conditional_detr import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel from .models.convbert import ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert from .models.convnext import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel from .models.convnextv2 import ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model, ConvNextV2PreTrainedModel from .models.cpmant import CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel from .models.ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel from .models.cvt import CvtForImageClassification, CvtModel, CvtPreTrainedModel from .models.dac import DacModel, DacPreTrainedModel from .models.data2vec import Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, Data2VecAudioForXVector, Data2VecAudioModel, Data2VecAudioPreTrainedModel, Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextModel, Data2VecTextPreTrainedModel, Data2VecVisionForImageClassification, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, Data2VecVisionPreTrainedModel from .models.dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel from .models.deberta import DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel from .models.deberta_v2 import DebertaV2ForMaskedLM, DebertaV2ForMultipleChoice, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, DebertaV2PreTrainedModel from .models.decision_transformer import DecisionTransformerGPT2Model, DecisionTransformerGPT2PreTrainedModel, DecisionTransformerModel, DecisionTransformerPreTrainedModel from .models.deformable_detr import DeformableDetrForObjectDetection, DeformableDetrModel, DeformableDetrPreTrainedModel from .models.deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel from .models.deprecated.deta import DetaForObjectDetection, DetaModel, DetaPreTrainedModel from .models.deprecated.efficientformer import EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel from .models.deprecated.ernie_m import ErnieMForInformationExtraction, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, ErnieMForSequenceClassification, ErnieMForTokenClassification, ErnieMModel, ErnieMPreTrainedModel from .models.deprecated.gptsan_japanese import GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel from .models.deprecated.graphormer import GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel from .models.deprecated.jukebox import JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE from .models.deprecated.mctct import MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel from .models.deprecated.mega import MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel from .models.deprecated.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings from .models.deprecated.nat import NatBackbone, NatForImageClassification, NatModel, NatPreTrainedModel from .models.deprecated.nezha import NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel from .models.deprecated.open_llama import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel, OpenLlamaPreTrainedModel from .models.deprecated.qdqbert import QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, load_tf_weights_in_qdqbert from .models.deprecated.realm import RealmEmbedder, RealmForOpenQA, RealmKnowledgeAugEncoder, RealmPreTrainedModel, RealmReader, RealmRetriever, RealmScorer, load_tf_weights_in_realm from .models.deprecated.retribert import RetriBertModel, RetriBertPreTrainedModel from .models.deprecated.speech_to_text_2 import Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel from .models.deprecated.trajectory_transformer import TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel from .models.deprecated.transfo_xl import AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl from .models.deprecated.tvlt import TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel from .models.deprecated.van import VanForImageClassification, VanModel, VanPreTrainedModel from .models.deprecated.vit_hybrid import ViTHybridForImageClassification, ViTHybridModel, ViTHybridPreTrainedModel from .models.deprecated.xlm_prophetnet import XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, XLMProphetNetPreTrainedModel from .models.depth_anything import DepthAnythingForDepthEstimation, DepthAnythingPreTrainedModel from .models.detr import DetrForObjectDetection, DetrForSegmentation, DetrModel, DetrPreTrainedModel from .models.dinat import DinatBackbone, DinatForImageClassification, DinatModel, DinatPreTrainedModel from .models.dinov2 import Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model, Dinov2PreTrainedModel from .models.distilbert import DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel from .models.donut import DonutSwinModel, DonutSwinPreTrainedModel from .models.dpr import DPRContextEncoder, DPRPretrainedContextEncoder, DPRPreTrainedModel, DPRPretrainedQuestionEncoder, DPRPretrainedReader, DPRQuestionEncoder, DPRReader from .models.dpt import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel from .models.efficientnet import EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel from .models.electra import ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra from .models.encodec import EncodecModel, EncodecPreTrainedModel from .models.encoder_decoder import EncoderDecoderModel from .models.ernie import ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel from .models.esm import EsmFoldPreTrainedModel, EsmForMaskedLM, EsmForProteinFolding, EsmForSequenceClassification, EsmForTokenClassification, EsmModel, EsmPreTrainedModel from .models.falcon import FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel from .models.falcon_mamba import FalconMambaForCausalLM, FalconMambaModel, FalconMambaPreTrainedModel from .models.fastspeech2_conformer import FastSpeech2ConformerHifiGan, FastSpeech2ConformerModel, FastSpeech2ConformerPreTrainedModel, FastSpeech2ConformerWithHifiGan from .models.flaubert import FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertPreTrainedModel, FlaubertWithLMHeadModel from .models.flava import FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, FlavaModel, FlavaMultimodalModel, FlavaPreTrainedModel, FlavaTextModel from .models.fnet import FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetModel, FNetPreTrainedModel from .models.focalnet import FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel from .models.funnel import FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel from .models.fuyu import FuyuForCausalLM, FuyuPreTrainedModel from .models.gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification, GemmaModel, GemmaPreTrainedModel from .models.gemma2 import Gemma2ForCausalLM, Gemma2ForSequenceClassification, Gemma2ForTokenClassification, Gemma2Model, Gemma2PreTrainedModel from .models.git import GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel from .models.glpn import GLPNForDepthEstimation, GLPNModel, GLPNPreTrainedModel from .models.gpt2 import GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, GPT2LMHeadModel, GPT2Model, GPT2PreTrainedModel, load_tf_weights_in_gpt2 from .models.gpt_bigcode import GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel from .models.gpt_neo import GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo from .models.gpt_neox import GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, GPTNeoXPreTrainedModel from .models.gpt_neox_japanese import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel from .models.gptj import GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, GPTJModel, GPTJPreTrainedModel from .models.granite import GraniteForCausalLM, GraniteModel, GranitePreTrainedModel from .models.grounding_dino import GroundingDinoForObjectDetection, GroundingDinoModel, GroundingDinoPreTrainedModel from .models.groupvit import GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel from .models.hiera import HieraBackbone, HieraForImageClassification, HieraForPreTraining, HieraModel, HieraPreTrainedModel from .models.hubert import HubertForCTC, HubertForSequenceClassification, HubertModel, HubertPreTrainedModel from .models.ibert import IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel from .models.idefics import IdeficsForVisionText2Text, IdeficsModel, IdeficsPreTrainedModel, IdeficsProcessor from .models.idefics2 import Idefics2ForConditionalGeneration, Idefics2Model, Idefics2PreTrainedModel, Idefics2Processor from .models.imagegpt import ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ImageGPTPreTrainedModel, load_tf_weights_in_imagegpt from .models.informer import InformerForPrediction, InformerModel, InformerPreTrainedModel from .models.instructblip import InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel from .models.instructblipvideo import InstructBlipVideoForConditionalGeneration, InstructBlipVideoPreTrainedModel, InstructBlipVideoQFormerModel, InstructBlipVideoVisionModel from .models.jamba import JambaForCausalLM, JambaForSequenceClassification, JambaModel, JambaPreTrainedModel from .models.jetmoe import JetMoeForCausalLM, JetMoeForSequenceClassification, JetMoeModel, JetMoePreTrainedModel from .models.kosmos2 import Kosmos2ForConditionalGeneration, Kosmos2Model, Kosmos2PreTrainedModel from .models.layoutlm import LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, LayoutLMPreTrainedModel from .models.layoutlmv2 import LayoutLMv2ForQuestionAnswering, LayoutLMv2ForSequenceClassification, LayoutLMv2ForTokenClassification, LayoutLMv2Model, LayoutLMv2PreTrainedModel from .models.layoutlmv3 import LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3Model, LayoutLMv3PreTrainedModel from .models.led import LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, LEDPreTrainedModel from .models.levit import LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, LevitPreTrainedModel from .models.lilt import LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel from .models.llama import LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel from .models.llava import LlavaForConditionalGeneration, LlavaPreTrainedModel from .models.llava_next import LlavaNextForConditionalGeneration, LlavaNextPreTrainedModel from .models.llava_next_video import LlavaNextVideoForConditionalGeneration, LlavaNextVideoPreTrainedModel from .models.llava_onevision import LlavaOnevisionForConditionalGeneration, LlavaOnevisionPreTrainedModel from .models.longformer import LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel from .models.longt5 import LongT5EncoderModel, LongT5ForConditionalGeneration, LongT5Model, LongT5PreTrainedModel from .models.luke import LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel from .models.lxmert import LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder from .models.m2m_100 import M2M100ForConditionalGeneration, M2M100Model, M2M100PreTrainedModel from .models.mamba import MambaForCausalLM, MambaModel, MambaPreTrainedModel from .models.mamba2 import Mamba2ForCausalLM, Mamba2Model, Mamba2PreTrainedModel from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel, MarianPreTrainedModel from .models.markuplm import MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, MarkupLMModel, MarkupLMPreTrainedModel from .models.mask2former import Mask2FormerForUniversalSegmentation, Mask2FormerModel, Mask2FormerPreTrainedModel from .models.maskformer import MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, MaskFormerSwinBackbone from .models.mbart import MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel from .models.megatron_bert import MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel from .models.mgp_str import MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel from .models.mistral import MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralPreTrainedModel from .models.mixtral import MixtralForCausalLM, MixtralForSequenceClassification, MixtralForTokenClassification, MixtralModel, MixtralPreTrainedModel from .models.mobilebert import MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert from .models.mobilenet_v1 import MobileNetV1ForImageClassification, MobileNetV1Model, MobileNetV1PreTrainedModel, load_tf_weights_in_mobilenet_v1 from .models.mobilenet_v2 import MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model, MobileNetV2PreTrainedModel, load_tf_weights_in_mobilenet_v2 from .models.mobilevit import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel from .models.mobilevitv2 import MobileViTV2ForImageClassification, MobileViTV2ForSemanticSegmentation, MobileViTV2Model, MobileViTV2PreTrainedModel from .models.mpnet import MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, MPNetPreTrainedModel from .models.mpt import MptForCausalLM, MptForQuestionAnswering, MptForSequenceClassification, MptForTokenClassification, MptModel, MptPreTrainedModel from .models.mra import MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, MraPreTrainedModel from .models.mt5 import MT5EncoderModel, MT5ForConditionalGeneration, MT5ForQuestionAnswering, MT5ForSequenceClassification, MT5ForTokenClassification, MT5Model, MT5PreTrainedModel from .models.musicgen import MusicgenForCausalLM, MusicgenForConditionalGeneration, MusicgenModel, MusicgenPreTrainedModel, MusicgenProcessor from .models.musicgen_melody import MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration, MusicgenMelodyModel, MusicgenMelodyPreTrainedModel from .models.mvp import MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel from .models.nemotron import NemotronForCausalLM, NemotronForQuestionAnswering, NemotronForSequenceClassification, NemotronForTokenClassification, NemotronModel, NemotronPreTrainedModel from .models.nllb_moe import NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTop2Router from .models.nystromformer import NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, NystromformerPreTrainedModel from .models.olmo import OlmoForCausalLM, OlmoModel, OlmoPreTrainedModel from .models.olmoe import OlmoeForCausalLM, OlmoeModel, OlmoePreTrainedModel from .models.oneformer import OneFormerForUniversalSegmentation, OneFormerModel, OneFormerPreTrainedModel from .models.openai import OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, OpenAIGPTPreTrainedModel, load_tf_weights_in_openai_gpt from .models.opt import OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel from .models.owlv2 import Owlv2ForObjectDetection, Owlv2Model, Owlv2PreTrainedModel, Owlv2TextModel, Owlv2VisionModel from .models.owlvit import OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel from .models.paligemma import PaliGemmaForConditionalGeneration, PaliGemmaPreTrainedModel, PaliGemmaProcessor from .models.patchtsmixer import PatchTSMixerForPrediction, PatchTSMixerForPretraining, PatchTSMixerForRegression, PatchTSMixerForTimeSeriesClassification, PatchTSMixerModel, PatchTSMixerPreTrainedModel from .models.patchtst import PatchTSTForClassification, PatchTSTForPrediction, PatchTSTForPretraining, PatchTSTForRegression, PatchTSTModel, PatchTSTPreTrainedModel from .models.pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel, PegasusPreTrainedModel from .models.pegasus_x import PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel from .models.perceiver import PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverModel, PerceiverPreTrainedModel from .models.persimmon import PersimmonForCausalLM, PersimmonForSequenceClassification, PersimmonForTokenClassification, PersimmonModel, PersimmonPreTrainedModel from .models.phi import PhiForCausalLM, PhiForSequenceClassification, PhiForTokenClassification, PhiModel, PhiPreTrainedModel from .models.phi3 import Phi3ForCausalLM, Phi3ForSequenceClassification, Phi3ForTokenClassification, Phi3Model, Phi3PreTrainedModel from .models.pix2struct import Pix2StructForConditionalGeneration, Pix2StructPreTrainedModel, Pix2StructTextModel, Pix2StructVisionModel from .models.pixtral import PixtralModel, PixtralPreTrainedModel from .models.plbart import PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel from .models.poolformer import PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel from .models.pop2piano import Pop2PianoForConditionalGeneration, Pop2PianoPreTrainedModel from .models.prophetnet import ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetPreTrainedModel from .models.pvt import PvtForImageClassification, PvtModel, PvtPreTrainedModel from .models.pvt_v2 import PvtV2Backbone, PvtV2ForImageClassification, PvtV2Model, PvtV2PreTrainedModel from .models.qwen2 import Qwen2ForCausalLM, Qwen2ForSequenceClassification, Qwen2ForTokenClassification, Qwen2Model, Qwen2PreTrainedModel from .models.qwen2_audio import Qwen2AudioEncoder, Qwen2AudioForConditionalGeneration, Qwen2AudioPreTrainedModel from .models.qwen2_moe import Qwen2MoeForCausalLM, Qwen2MoeForSequenceClassification, Qwen2MoeForTokenClassification, Qwen2MoeModel, Qwen2MoePreTrainedModel from .models.qwen2_vl import Qwen2VLForConditionalGeneration, Qwen2VLModel, Qwen2VLPreTrainedModel from .models.rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration from .models.recurrent_gemma import RecurrentGemmaForCausalLM, RecurrentGemmaModel, RecurrentGemmaPreTrainedModel from .models.reformer import ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel from .models.regnet import RegNetForImageClassification, RegNetModel, RegNetPreTrainedModel from .models.rembert import RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert from .models.resnet import ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel from .models.roberta import RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel from .models.roberta_prelayernorm import RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel from .models.roc_bert import RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert from .models.roformer import RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer from .models.rt_detr import RTDetrForObjectDetection, RTDetrModel, RTDetrPreTrainedModel, RTDetrResNetBackbone, RTDetrResNetPreTrainedModel from .models.rwkv import RwkvForCausalLM, RwkvModel, RwkvPreTrainedModel from .models.sam import SamModel, SamPreTrainedModel from .models.seamless_m4t import SeamlessM4TCodeHifiGan, SeamlessM4TForSpeechToSpeech, SeamlessM4TForSpeechToText, SeamlessM4TForTextToSpeech, SeamlessM4TForTextToText, SeamlessM4THifiGan, SeamlessM4TModel, SeamlessM4TPreTrainedModel, SeamlessM4TTextToUnitForConditionalGeneration, SeamlessM4TTextToUnitModel from .models.seamless_m4t_v2 import SeamlessM4Tv2ForSpeechToSpeech, SeamlessM4Tv2ForSpeechToText, SeamlessM4Tv2ForTextToSpeech, SeamlessM4Tv2ForTextToText, SeamlessM4Tv2Model, SeamlessM4Tv2PreTrainedModel from .models.segformer import SegformerDecodeHead, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, SegformerPreTrainedModel from .models.seggpt import SegGptForImageSegmentation, SegGptModel, SegGptPreTrainedModel from .models.sew import SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel from .models.sew_d import SEWDForCTC, SEWDForSequenceClassification, SEWDModel, SEWDPreTrainedModel from .models.siglip import SiglipForImageClassification, SiglipModel, SiglipPreTrainedModel, SiglipTextModel, SiglipVisionModel from .models.speech_encoder_decoder import SpeechEncoderDecoderModel from .models.speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextPreTrainedModel from .models.speecht5 import SpeechT5ForSpeechToSpeech, SpeechT5ForSpeechToText, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Model, SpeechT5PreTrainedModel from .models.splinter import SplinterForPreTraining, SplinterForQuestionAnswering, SplinterModel, SplinterPreTrainedModel from .models.squeezebert import SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertPreTrainedModel from .models.stablelm import StableLmForCausalLM, StableLmForSequenceClassification, StableLmForTokenClassification, StableLmModel, StableLmPreTrainedModel from .models.starcoder2 import Starcoder2ForCausalLM, Starcoder2ForSequenceClassification, Starcoder2ForTokenClassification, Starcoder2Model, Starcoder2PreTrainedModel from .models.superpoint import SuperPointForKeypointDetection, SuperPointPreTrainedModel from .models.swiftformer import SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel from .models.swin import SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel from .models.swin2sr import Swin2SRForImageSuperResolution, Swin2SRModel, Swin2SRPreTrainedModel from .models.swinv2 import Swinv2Backbone, Swinv2ForImageClassification, Swinv2ForMaskedImageModeling, Swinv2Model, Swinv2PreTrainedModel from .models.switch_transformers import SwitchTransformersEncoderModel, SwitchTransformersForConditionalGeneration, SwitchTransformersModel, SwitchTransformersPreTrainedModel, SwitchTransformersSparseMLP, SwitchTransformersTop1Router from .models.t5 import T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, T5ForSequenceClassification, T5ForTokenClassification, T5Model, T5PreTrainedModel, load_tf_weights_in_t5 from .models.table_transformer import TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel from .models.tapas import TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas from .models.time_series_transformer import TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel from .models.timesformer import TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel from .models.timm_backbone import TimmBackbone from .models.trocr import TrOCRForCausalLM, TrOCRPreTrainedModel from .models.tvp import TvpForVideoGrounding, TvpModel, TvpPreTrainedModel from .models.udop import UdopEncoderModel, UdopForConditionalGeneration, UdopModel, UdopPreTrainedModel from .models.umt5 import UMT5EncoderModel, UMT5ForConditionalGeneration, UMT5ForQuestionAnswering, UMT5ForSequenceClassification, UMT5ForTokenClassification, UMT5Model, UMT5PreTrainedModel from .models.unispeech import UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel from .models.unispeech_sat import UniSpeechSatForAudioFrameClassification, UniSpeechSatForCTC, UniSpeechSatForPreTraining, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, UniSpeechSatModel, UniSpeechSatPreTrainedModel from .models.univnet import UnivNetModel from .models.upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel from .models.video_llava import VideoLlavaForConditionalGeneration, VideoLlavaPreTrainedModel, VideoLlavaProcessor from .models.videomae import VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, VideoMAEPreTrainedModel from .models.vilt import ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltForTokenClassification, ViltModel, ViltPreTrainedModel from .models.vipllava import VipLlavaForConditionalGeneration, VipLlavaPreTrainedModel from .models.vision_encoder_decoder import VisionEncoderDecoderModel from .models.vision_text_dual_encoder import VisionTextDualEncoderModel from .models.visual_bert import VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForRegionToPhraseAlignment, VisualBertForVisualReasoning, VisualBertModel, VisualBertPreTrainedModel from .models.vit import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel from .models.vit_mae import ViTMAEForPreTraining, ViTMAEModel, ViTMAEPreTrainedModel from .models.vit_msn import ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel from .models.vitdet import VitDetBackbone, VitDetModel, VitDetPreTrainedModel from .models.vitmatte import VitMatteForImageMatting, VitMattePreTrainedModel from .models.vits import VitsModel, VitsPreTrainedModel from .models.vivit import VivitForVideoClassification, VivitModel, VivitPreTrainedModel from .models.wav2vec2 import Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2PreTrainedModel from .models.wav2vec2_bert import Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForCTC, Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForXVector, Wav2Vec2BertModel, Wav2Vec2BertPreTrainedModel from .models.wav2vec2_conformer import Wav2Vec2ConformerForAudioFrameClassification, Wav2Vec2ConformerForCTC, Wav2Vec2ConformerForPreTraining, Wav2Vec2ConformerForSequenceClassification, Wav2Vec2ConformerForXVector, Wav2Vec2ConformerModel, Wav2Vec2ConformerPreTrainedModel from .models.wavlm import WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel from .models.whisper import WhisperForAudioClassification, WhisperForCausalLM, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel from .models.x_clip import XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel from .models.xglm import XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel from .models.xlm import XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel from .models.xlm_roberta import XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel from .models.xlm_roberta_xl import XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel from .models.xlnet import XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet from .models.xmod import XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel from .models.yolos import YolosForObjectDetection, YolosModel, YolosPreTrainedModel from .models.yoso import YosoForMaskedLM, YosoForMultipleChoice, YosoForQuestionAnswering, YosoForSequenceClassification, YosoForTokenClassification, YosoModel, YosoPreTrainedModel from .models.zoedepth import ZoeDepthForDepthEstimation, ZoeDepthPreTrainedModel from .optimization import Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, get_wsd_schedule from .pytorch_utils import Conv1D, apply_chunking_to_forward, prune_layer from .trainer import Trainer from .trainer_pt_utils import torch_distributed_zero_first from .trainer_seq2seq import Seq2SeqTrainer try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_tf_objects import * else: from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments from .benchmark.benchmark_tf import TensorFlowBenchmark from .generation import TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFGenerationMixin, TFLogitsProcessor, TFLogitsProcessorList, TFLogitsWarper, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper from .keras_callbacks import KerasMetricCallback, PushToHubCallback from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list from .models.albert import TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel from .models.auto import TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TEXT_ENCODING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForAudioClassification, TFAutoModelForCausalLM, TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedImageModeling, TFAutoModelForMaskedLM, TFAutoModelForMaskGeneration, TFAutoModelForMultipleChoice, TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForSpeechSeq2Seq, TFAutoModelForTableQuestionAnswering, TFAutoModelForTextEncoding, TFAutoModelForTokenClassification, TFAutoModelForVision2Seq, TFAutoModelForZeroShotImageClassification, TFAutoModelWithLMHead from .models.bart import TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel, TFBartPretrainedModel from .models.bert import TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel from .models.blenderbot import TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel from .models.blenderbot_small import TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel from .models.blip import TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel from .models.camembert import TFCamembertForCausalLM, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, TFCamembertPreTrainedModel from .models.clip import TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel from .models.convbert import TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, TFConvBertPreTrainedModel from .models.convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel from .models.convnextv2 import TFConvNextV2ForImageClassification, TFConvNextV2Model, TFConvNextV2PreTrainedModel from .models.ctrl import TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel from .models.cvt import TFCvtForImageClassification, TFCvtModel, TFCvtPreTrainedModel from .models.data2vec import TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, TFData2VecVisionPreTrainedModel from .models.deberta import TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel from .models.deberta_v2 import TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, TFDebertaV2Model, TFDebertaV2PreTrainedModel from .models.deit import TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel from .models.deprecated.efficientformer import TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel from .models.deprecated.transfo_xl import TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel from .models.distilbert import TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel from .models.dpr import TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, TFDPRPretrainedReader, TFDPRQuestionEncoder, TFDPRReader from .models.electra import TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel from .models.encoder_decoder import TFEncoderDecoderModel from .models.esm import TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, TFEsmPreTrainedModel from .models.flaubert import TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertPreTrainedModel, TFFlaubertWithLMHeadModel from .models.funnel import TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel from .models.gpt2 import TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, TFGPT2MainLayer, TFGPT2Model, TFGPT2PreTrainedModel from .models.gptj import TFGPTJForCausalLM, TFGPTJForQuestionAnswering, TFGPTJForSequenceClassification, TFGPTJModel, TFGPTJPreTrainedModel from .models.groupvit import TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel from .models.hubert import TFHubertForCTC, TFHubertModel, TFHubertPreTrainedModel from .models.idefics import TFIdeficsForVisionText2Text, TFIdeficsModel, TFIdeficsPreTrainedModel from .models.layoutlm import TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMMainLayer, TFLayoutLMModel, TFLayoutLMPreTrainedModel from .models.layoutlmv3 import TFLayoutLMv3ForQuestionAnswering, TFLayoutLMv3ForSequenceClassification, TFLayoutLMv3ForTokenClassification, TFLayoutLMv3Model, TFLayoutLMv3PreTrainedModel from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel from .models.longformer import TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel from .models.lxmert import TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder from .models.marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel from .models.mistral import TFMistralForCausalLM, TFMistralForSequenceClassification, TFMistralModel, TFMistralPreTrainedModel from .models.mobilebert import TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel from .models.mobilevit import TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel from .models.mpnet import TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetMainLayer, TFMPNetModel, TFMPNetPreTrainedModel from .models.mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model from .models.openai import TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, TFOpenAIGPTMainLayer, TFOpenAIGPTModel, TFOpenAIGPTPreTrainedModel from .models.opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel from .models.pegasus import TFPegasusForConditionalGeneration, TFPegasusModel, TFPegasusPreTrainedModel from .models.rag import TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration from .models.regnet import TFRegNetForImageClassification, TFRegNetModel, TFRegNetPreTrainedModel from .models.rembert import TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertModel, TFRemBertPreTrainedModel from .models.resnet import TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel from .models.roberta import TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel from .models.roberta_prelayernorm import TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel from .models.roformer import TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, TFRoFormerPreTrainedModel from .models.sam import TFSamModel, TFSamPreTrainedModel from .models.segformer import TFSegformerDecodeHead, TFSegformerForImageClassification, TFSegformerForSemanticSegmentation, TFSegformerModel, TFSegformerPreTrainedModel from .models.speech_to_text import TFSpeech2TextForConditionalGeneration, TFSpeech2TextModel, TFSpeech2TextPreTrainedModel from .models.swiftformer import TFSwiftFormerForImageClassification, TFSwiftFormerModel, TFSwiftFormerPreTrainedModel from .models.swin import TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel from .models.t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel from .models.tapas import TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel from .models.vision_text_dual_encoder import TFVisionTextDualEncoderModel from .models.vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel from .models.vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel from .models.wav2vec2 import TFWav2Vec2ForCTC, TFWav2Vec2ForSequenceClassification, TFWav2Vec2Model, TFWav2Vec2PreTrainedModel from .models.whisper import TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel from .models.xglm import TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel from .models.xlm import TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel from .models.xlm_roberta import TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel from .models.xlnet import TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer try: if not (is_librosa_available() and is_essentia_available() and is_scipy_available() and is_torch_available() and is_pretty_midi_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects import * else: from .models.pop2piano import Pop2PianoFeatureExtractor, Pop2PianoProcessor, Pop2PianoTokenizer try: if not is_torchaudio_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torchaudio_objects import * else: from .models.musicgen_melody import MusicgenMelodyFeatureExtractor, MusicgenMelodyProcessor try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * else: from .generation import FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxForceTokensLogitsProcessor, FlaxGenerationMixin, FlaxLogitsProcessor, FlaxLogitsProcessorList, FlaxLogitsWarper, FlaxMinLengthLogitsProcessor, FlaxSuppressTokensAtBeginLogitsProcessor, FlaxSuppressTokensLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, FlaxWhisperTimeStampLogitsProcessor from .modeling_flax_utils import FlaxPreTrainedModel from .models.albert import FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel from .models.auto import FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, FLAX_MODEL_FOR_PRETRAINING_MAPPING, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForCausalLM, FlaxAutoModelForImageClassification, FlaxAutoModelForMaskedLM, FlaxAutoModelForMultipleChoice, FlaxAutoModelForNextSentencePrediction, FlaxAutoModelForPreTraining, FlaxAutoModelForQuestionAnswering, FlaxAutoModelForSeq2SeqLM, FlaxAutoModelForSequenceClassification, FlaxAutoModelForSpeechSeq2Seq, FlaxAutoModelForTokenClassification, FlaxAutoModelForVision2Seq from .models.bart import FlaxBartDecoderPreTrainedModel, FlaxBartForCausalLM, FlaxBartForConditionalGeneration, FlaxBartForQuestionAnswering, FlaxBartForSequenceClassification, FlaxBartModel, FlaxBartPreTrainedModel from .models.beit import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel from .models.bert import FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel from .models.big_bird import FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, FlaxBigBirdPreTrainedModel from .models.blenderbot import FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel from .models.blenderbot_small import FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel from .models.bloom import FlaxBloomForCausalLM, FlaxBloomModel, FlaxBloomPreTrainedModel from .models.clip import FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel from .models.dinov2 import FlaxDinov2ForImageClassification, FlaxDinov2Model, FlaxDinov2PreTrainedModel from .models.distilbert import FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel from .models.electra import FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel from .models.encoder_decoder import FlaxEncoderDecoderModel from .models.gemma import FlaxGemmaForCausalLM, FlaxGemmaModel, FlaxGemmaPreTrainedModel from .models.gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel from .models.gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel from .models.gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel from .models.llama import FlaxLlamaForCausalLM, FlaxLlamaModel, FlaxLlamaPreTrainedModel from .models.longt5 import FlaxLongT5ForConditionalGeneration, FlaxLongT5Model, FlaxLongT5PreTrainedModel from .models.marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel from .models.mbart import FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel from .models.mistral import FlaxMistralForCausalLM, FlaxMistralModel, FlaxMistralPreTrainedModel from .models.mt5 import FlaxMT5EncoderModel, FlaxMT5ForConditionalGeneration, FlaxMT5Model from .models.opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel from .models.pegasus import FlaxPegasusForConditionalGeneration, FlaxPegasusModel, FlaxPegasusPreTrainedModel from .models.regnet import FlaxRegNetForImageClassification, FlaxRegNetModel, FlaxRegNetPreTrainedModel from .models.resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel from .models.roberta import FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel from .models.roberta_prelayernorm import FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel from .models.roformer import FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel from .models.speech_encoder_decoder import FlaxSpeechEncoderDecoderModel from .models.t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model, FlaxT5PreTrainedModel from .models.vision_encoder_decoder import FlaxVisionEncoderDecoderModel from .models.vision_text_dual_encoder import FlaxVisionTextDualEncoderModel from .models.vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel from .models.wav2vec2 import FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining, FlaxWav2Vec2Model, FlaxWav2Vec2PreTrainedModel from .models.whisper import FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel from .models.xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel from .models.xlm_roberta import FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__, extra_objects={'__version__': __version__}) if not is_tf_available() and (not is_torch_available()) and (not is_flax_available()): logger.warning_advice("None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.") # File: transformers-main/src/transformers/activations.py import math from collections import OrderedDict import torch from packaging import version from torch import Tensor, nn from .utils import logging logger = logging.get_logger(__name__) class PytorchGELUTanh(nn.Module): def __init__(self): super().__init__() if version.parse(torch.__version__) < version.parse('1.12.0'): raise ImportError(f'You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use PytorchGELUTanh. Please upgrade torch.') def forward(self, input: Tensor) -> Tensor: return nn.functional.gelu(input, approximate='tanh') class NewGELUActivation(nn.Module): def forward(self, input: Tensor) -> Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0)))) class GELUActivation(nn.Module): def __init__(self, use_gelu_python: bool=False): super().__init__() if use_gelu_python: self.act = self._gelu_python else: self.act = nn.functional.gelu def _gelu_python(self, input: Tensor) -> Tensor: return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0))) def forward(self, input: Tensor) -> Tensor: return self.act(input) class FastGELUActivation(nn.Module): def forward(self, input: Tensor) -> Tensor: return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input))) class QuickGELUActivation(nn.Module): def forward(self, input: Tensor) -> Tensor: return input * torch.sigmoid(1.702 * input) class ClippedGELUActivation(nn.Module): def __init__(self, min: float, max: float): if min > max: raise ValueError(f'min should be < max (got min: {min}, max: {max})') super().__init__() self.min = min self.max = max def forward(self, x: Tensor) -> Tensor: return torch.clip(gelu(x), self.min, self.max) class AccurateGELUActivation(nn.Module): def __init__(self): super().__init__() self.precomputed_constant = math.sqrt(2 / math.pi) def forward(self, input: Tensor) -> Tensor: return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3)))) class MishActivation(nn.Module): def __init__(self): super().__init__() if version.parse(torch.__version__) < version.parse('1.9.0'): self.act = self._mish_python else: self.act = nn.functional.mish def _mish_python(self, input: Tensor) -> Tensor: return input * torch.tanh(nn.functional.softplus(input)) def forward(self, input: Tensor) -> Tensor: return self.act(input) class LinearActivation(nn.Module): def forward(self, input: Tensor) -> Tensor: return input class LaplaceActivation(nn.Module): def forward(self, input, mu=0.707107, sigma=0.282095): input = (input - mu).div(sigma * math.sqrt(2.0)) return 0.5 * (1.0 + torch.erf(input)) class ReLUSquaredActivation(nn.Module): def forward(self, input): relu_applied = nn.functional.relu(input) squared = torch.square(relu_applied) return squared class ClassInstantier(OrderedDict): def __getitem__(self, key): content = super().__getitem__(key) (cls, kwargs) = content if isinstance(content, tuple) else (content, {}) return cls(**kwargs) ACT2CLS = {'gelu': GELUActivation, 'gelu_10': (ClippedGELUActivation, {'min': -10, 'max': 10}), 'gelu_fast': FastGELUActivation, 'gelu_new': NewGELUActivation, 'gelu_python': (GELUActivation, {'use_gelu_python': True}), 'gelu_pytorch_tanh': PytorchGELUTanh, 'gelu_accurate': AccurateGELUActivation, 'laplace': LaplaceActivation, 'leaky_relu': nn.LeakyReLU, 'linear': LinearActivation, 'mish': MishActivation, 'quick_gelu': QuickGELUActivation, 'relu': nn.ReLU, 'relu2': ReLUSquaredActivation, 'relu6': nn.ReLU6, 'sigmoid': nn.Sigmoid, 'silu': nn.SiLU, 'swish': nn.SiLU, 'tanh': nn.Tanh} ACT2FN = ClassInstantier(ACT2CLS) def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}') gelu_python = get_activation('gelu_python') gelu_new = get_activation('gelu_new') gelu = get_activation('gelu') gelu_fast = get_activation('gelu_fast') quick_gelu = get_activation('quick_gelu') silu = get_activation('silu') mish = get_activation('mish') linear_act = get_activation('linear') # File: transformers-main/src/transformers/activations_tf.py import math import tensorflow as tf from packaging.version import parse try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError('Your currently installed version of Keras is Keras 3, but this is not yet supported in Transformers. Please install the backwards-compatible tf-keras package with `pip install tf-keras`.') def _gelu(x): x = tf.convert_to_tensor(x) cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype))) return x * cdf def _gelu_new(x): x = tf.convert_to_tensor(x) pi = tf.cast(math.pi, x.dtype) coeff = tf.cast(0.044715, x.dtype) cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3)))) return x * cdf def mish(x): x = tf.convert_to_tensor(x) return x * tf.tanh(tf.math.softplus(x)) def gelu_fast(x): x = tf.convert_to_tensor(x) coeff1 = tf.cast(0.044715, x.dtype) coeff2 = tf.cast(0.7978845608, x.dtype) return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x))) def quick_gelu(x): x = tf.convert_to_tensor(x) coeff = tf.cast(1.702, x.dtype) return x * tf.math.sigmoid(coeff * x) def gelu_10(x): return tf.clip_by_value(_gelu(x), -10, 10) def glu(x, axis=-1): (a, b) = tf.split(x, 2, axis=axis) return a * tf.math.sigmoid(b) if parse(tf.version.VERSION) >= parse('2.4'): def approximate_gelu_wrap(x): return keras.activations.gelu(x, approximate=True) gelu = keras.activations.gelu gelu_new = approximate_gelu_wrap else: gelu = _gelu gelu_new = _gelu_new ACT2FN = {'gelu': gelu, 'gelu_10': gelu_10, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': keras.activations.relu, 'sigmoid': keras.activations.sigmoid, 'silu': keras.activations.swish, 'swish': keras.activations.swish, 'tanh': keras.activations.tanh} def get_tf_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}') # File: transformers-main/src/transformers/agents/__init__.py from typing import TYPE_CHECKING from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'agents': ['Agent', 'CodeAgent', 'ManagedAgent', 'ReactAgent', 'ReactCodeAgent', 'ReactJsonAgent', 'Toolbox'], 'llm_engine': ['HfApiEngine', 'TransformersEngine'], 'monitoring': ['stream_to_gradio'], 'tools': ['PipelineTool', 'Tool', 'ToolCollection', 'launch_gradio_demo', 'load_tool']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['default_tools'] = ['FinalAnswerTool', 'PythonInterpreterTool'] _import_structure['document_question_answering'] = ['DocumentQuestionAnsweringTool'] _import_structure['image_question_answering'] = ['ImageQuestionAnsweringTool'] _import_structure['search'] = ['DuckDuckGoSearchTool', 'VisitWebpageTool'] _import_structure['speech_to_text'] = ['SpeechToTextTool'] _import_structure['text_to_speech'] = ['TextToSpeechTool'] _import_structure['translation'] = ['TranslationTool'] if TYPE_CHECKING: from .agents import Agent, CodeAgent, ManagedAgent, ReactAgent, ReactCodeAgent, ReactJsonAgent, Toolbox from .llm_engine import HfApiEngine, TransformersEngine from .monitoring import stream_to_gradio from .tools import PipelineTool, Tool, ToolCollection, launch_gradio_demo, load_tool try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .default_tools import FinalAnswerTool, PythonInterpreterTool from .document_question_answering import DocumentQuestionAnsweringTool from .image_question_answering import ImageQuestionAnsweringTool from .search import DuckDuckGoSearchTool, VisitWebpageTool from .speech_to_text import SpeechToTextTool from .text_to_speech import TextToSpeechTool from .translation import TranslationTool else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/agents/agent_types.py import os import pathlib import tempfile import uuid import numpy as np from ..utils import is_soundfile_availble, is_torch_available, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): from PIL import Image from PIL.Image import Image as ImageType else: ImageType = object if is_torch_available(): import torch from torch import Tensor else: Tensor = object if is_soundfile_availble(): import soundfile as sf class AgentType: def __init__(self, value): self._value = value def __str__(self): return self.to_string() def to_raw(self): logger.error('This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable') return self._value def to_string(self) -> str: logger.error('This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable') return str(self._value) class AgentText(AgentType, str): def to_raw(self): return self._value def to_string(self): return str(self._value) class AgentImage(AgentType, ImageType): def __init__(self, value): AgentType.__init__(self, value) ImageType.__init__(self) if not is_vision_available(): raise ImportError('PIL must be installed in order to handle images.') self._path = None self._raw = None self._tensor = None if isinstance(value, ImageType): self._raw = value elif isinstance(value, (str, pathlib.Path)): self._path = value elif isinstance(value, torch.Tensor): self._tensor = value elif isinstance(value, np.ndarray): self._tensor = torch.from_numpy(value) else: raise TypeError(f'Unsupported type for {self.__class__.__name__}: {type(value)}') def _ipython_display_(self, include=None, exclude=None): from IPython.display import Image, display display(Image(self.to_string())) def to_raw(self): if self._raw is not None: return self._raw if self._path is not None: self._raw = Image.open(self._path) return self._raw if self._tensor is not None: array = self._tensor.cpu().detach().numpy() return Image.fromarray((255 - array * 255).astype(np.uint8)) def to_string(self): if self._path is not None: return self._path if self._raw is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + '.png') self._raw.save(self._path) return self._path if self._tensor is not None: array = self._tensor.cpu().detach().numpy() img = Image.fromarray((255 - array * 255).astype(np.uint8)) directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + '.png') img.save(self._path) return self._path def save(self, output_bytes, format, **params): img = self.to_raw() img.save(output_bytes, format, **params) class AgentAudio(AgentType, str): def __init__(self, value, samplerate=16000): super().__init__(value) if not is_soundfile_availble(): raise ImportError('soundfile must be installed in order to handle audio.') self._path = None self._tensor = None self.samplerate = samplerate if isinstance(value, (str, pathlib.Path)): self._path = value elif is_torch_available() and isinstance(value, torch.Tensor): self._tensor = value elif isinstance(value, tuple): self.samplerate = value[0] if isinstance(value[1], np.ndarray): self._tensor = torch.from_numpy(value[1]) else: self._tensor = torch.tensor(value[1]) else: raise ValueError(f'Unsupported audio type: {type(value)}') def _ipython_display_(self, include=None, exclude=None): from IPython.display import Audio, display display(Audio(self.to_string(), rate=self.samplerate)) def to_raw(self): if self._tensor is not None: return self._tensor if self._path is not None: (tensor, self.samplerate) = sf.read(self._path) self._tensor = torch.tensor(tensor) return self._tensor def to_string(self): if self._path is not None: return self._path if self._tensor is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + '.wav') sf.write(self._path, self._tensor, samplerate=self.samplerate) return self._path AGENT_TYPE_MAPPING = {'text': AgentText, 'image': AgentImage, 'audio': AgentAudio} INSTANCE_TYPE_MAPPING = {str: AgentText, ImageType: AgentImage} if is_torch_available(): INSTANCE_TYPE_MAPPING[Tensor] = AgentAudio def handle_agent_inputs(*args, **kwargs): args = [arg.to_raw() if isinstance(arg, AgentType) else arg for arg in args] kwargs = {k: v.to_raw() if isinstance(v, AgentType) else v for (k, v) in kwargs.items()} return (args, kwargs) def handle_agent_outputs(output, output_type=None): if output_type in AGENT_TYPE_MAPPING: decoded_outputs = AGENT_TYPE_MAPPING[output_type](output) return decoded_outputs else: for (_k, _v) in INSTANCE_TYPE_MAPPING.items(): if isinstance(output, _k): return _v(output) return output # File: transformers-main/src/transformers/agents/agents.py import json import logging import re from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from .. import is_torch_available from ..utils import logging as transformers_logging from ..utils.import_utils import is_pygments_available from .agent_types import AgentAudio, AgentImage, AgentText from .default_tools import BASE_PYTHON_TOOLS, FinalAnswerTool, setup_default_tools from .llm_engine import HfApiEngine, MessageRole from .prompts import DEFAULT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_JSON_SYSTEM_PROMPT, PLAN_UPDATE_FINAL_PLAN_REDACTION, PROMPTS_FOR_INITIAL_PLAN, PROMPTS_FOR_PLAN_UPDATE, SUPPORTED_PLAN_TYPES, SYSTEM_PROMPT_FACTS, SYSTEM_PROMPT_FACTS_UPDATE, USER_PROMPT_FACTS_UPDATE from .python_interpreter import LIST_SAFE_MODULES, evaluate_python_code from .tools import DEFAULT_TOOL_DESCRIPTION_TEMPLATE, Tool, get_tool_description_with_args, load_tool if is_pygments_available(): from pygments import highlight from pygments.formatters import Terminal256Formatter from pygments.lexers import PythonLexer class CustomFormatter(logging.Formatter): grey = '\x1b[38;20m' bold_yellow = '\x1b[33;1m' red = '\x1b[31;20m' green = '\x1b[32;20m' bold_green = '\x1b[32;20;1m' bold_red = '\x1b[31;1m' bold_white = '\x1b[37;1m' orange = '\x1b[38;5;214m' bold_orange = '\x1b[38;5;214;1m' reset = '\x1b[0m' format = '%(message)s' FORMATS = {logging.DEBUG: grey + format + reset, logging.INFO: format, logging.WARNING: bold_yellow + format + reset, logging.ERROR: red + format + reset, logging.CRITICAL: bold_red + format + reset, 31: reset + format + reset, 32: green + format + reset, 33: bold_green + format + reset, 34: bold_white + format + reset, 35: orange + format + reset, 36: bold_orange + format + reset} def format(self, record): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record) logger = transformers_logging.get_logger(__name__) logger.propagate = False ch = logging.StreamHandler() ch.setFormatter(CustomFormatter()) logger.addHandler(ch) def parse_json_blob(json_blob: str) -> Dict[str, str]: try: first_accolade_index = json_blob.find('{') last_accolade_index = [a.start() for a in list(re.finditer('}', json_blob))][-1] json_blob = json_blob[first_accolade_index:last_accolade_index + 1].replace('\\"', "'") json_data = json.loads(json_blob, strict=False) return json_data except json.JSONDecodeError as e: place = e.pos if json_blob[place - 1:place + 2] == '},\n': raise ValueError('JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL.') raise ValueError(f"The JSON blob you used is invalid due to the following error: {e}.\nJSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n'{json_blob[place - 4:place + 5]}'.") except Exception as e: raise ValueError(f'Error in parsing the JSON blob: {e}') def parse_code_blob(code_blob: str) -> str: try: pattern = '```(?:py|python)?\\n(.*?)\\n```' match = re.search(pattern, code_blob, re.DOTALL) return match.group(1).strip() except Exception as e: raise ValueError(f'\nThe code blob you used is invalid: due to the following error: {e}\nThis means that the regex pattern {pattern} was not respected: make sure to include code with the correct pattern, for instance:\nThoughts: Your thoughts\nCode:\n```py\n# Your python code here\n```') def parse_json_tool_call(json_blob: str) -> Tuple[str, Dict[str, str]]: json_blob = json_blob.replace('```json', '').replace('```', '') tool_call = parse_json_blob(json_blob) if 'action' in tool_call and 'action_input' in tool_call: return (tool_call['action'], tool_call['action_input']) elif 'action' in tool_call: return (tool_call['action'], None) else: raise ValueError(f"Missing keys: {[key for key in ['action', 'action_input'] if key not in tool_call]} in blob {tool_call}") def parse_text_tool_call(text: str) -> Tuple[str, Union[str, Dict[str, str]]]: try: if 'Observation:' in text: text = text.split('Observation:')[0] if 'Action:' in text: text = text.split('Action:')[1] (tool_name, tool_input) = text.split('Action input:') if '{' in tool_input: tool_input = parse_json_blob(tool_input) else: tool_input = tool_input.strip().replace('"', '') return (tool_name.strip().replace('"', '').replace('\\', ''), tool_input) except Exception as e: raise ValueError(f'Error in parsing the text tool call: {e}. Be sure to provide the correct format. DO NOT repeat your previous incorrect tool call.') def to_text(input: Union[List[Dict[str, str]], Dict[str, str], str]) -> str: if isinstance(input, list): return '\n'.join([m['content'] for m in input]) elif isinstance(input, dict): return input['content'] else: return input HUGGINGFACE_DEFAULT_TOOLS = {} _tools_are_initialized = False class Toolbox: def __init__(self, tools: List[Tool], add_base_tools: bool=False): self._tools = {tool.name: tool for tool in tools} if add_base_tools: self.add_base_tools() self._load_tools_if_needed() def add_base_tools(self, add_python_interpreter: bool=False): global _tools_are_initialized global HUGGINGFACE_DEFAULT_TOOLS if not _tools_are_initialized: HUGGINGFACE_DEFAULT_TOOLS = setup_default_tools(logger) _tools_are_initialized = True for tool in HUGGINGFACE_DEFAULT_TOOLS.values(): if tool.name != 'python_interpreter' or add_python_interpreter: self.add_tool(tool) self._load_tools_if_needed() @property def tools(self) -> Dict[str, Tool]: return self._tools def show_tool_descriptions(self, tool_description_template: str=None) -> str: return '\n'.join([get_tool_description_with_args(tool, tool_description_template) for tool in self._tools.values()]) def add_tool(self, tool: Tool): if tool.name in self._tools: raise KeyError(f"Error: tool '{tool.name}' already exists in the toolbox.") self._tools[tool.name] = tool def remove_tool(self, tool_name: str): if tool_name not in self._tools: raise KeyError(f'Error: tool {tool_name} not found in toolbox for removal, should be instead one of {list(self._tools.keys())}.') del self._tools[tool_name] def update_tool(self, tool: Tool): if tool.name not in self._tools: raise KeyError(f'Error: tool {tool.name} not found in toolbox for update, should be instead one of {list(self._tools.keys())}.') self._tools[tool.name] = tool def clear_toolbox(self): self._tools = {} def _load_tools_if_needed(self): for (name, tool) in self._tools.items(): if not isinstance(tool, Tool): task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id self._tools[name] = load_tool(task_or_repo_id) def __repr__(self): toolbox_description = 'Toolbox contents:\n' for tool in self._tools.values(): toolbox_description += f'\t{tool.name}: {tool.description}\n' return toolbox_description class AgentError(Exception): def __init__(self, message): super().__init__(message) self.message = message class AgentParsingError(AgentError): pass class AgentExecutionError(AgentError): pass class AgentMaxIterationsError(AgentError): pass class AgentGenerationError(AgentError): pass def format_prompt_with_tools(toolbox: Toolbox, prompt_template: str, tool_description_template: str) -> str: tool_descriptions = toolbox.show_tool_descriptions(tool_description_template) prompt = prompt_template.replace('<>', tool_descriptions) if '<>' in prompt: tool_names = [f"'{tool_name}'" for tool_name in toolbox.tools.keys()] prompt = prompt.replace('<>', ', '.join(tool_names)) return prompt def show_agents_descriptions(managed_agents: list): managed_agents_descriptions = "\nYou can also give requests to team members.\nCalling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'request', a long string explaning your request.\nGiven that this team member is a real human, you should be very verbose in your request.\nHere is a list of the team members that you can call:" for agent in managed_agents.values(): managed_agents_descriptions += f'\n- {agent.name}: {agent.description}' return managed_agents_descriptions def format_prompt_with_managed_agents_descriptions(prompt_template, managed_agents=None) -> str: if managed_agents is not None: return prompt_template.replace('<>', show_agents_descriptions(managed_agents)) else: return prompt_template.replace('<>', '') def format_prompt_with_imports(prompt_template: str, authorized_imports: List[str]) -> str: if '<>' not in prompt_template: raise AgentError("Tag '<>' should be provided in the prompt.") return prompt_template.replace('<>', str(authorized_imports)) class Agent: def __init__(self, tools: Union[List[Tool], Toolbox], llm_engine: Callable=HfApiEngine(), system_prompt=DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template=None, additional_args={}, max_iterations: int=6, tool_parser=parse_json_tool_call, add_base_tools: bool=False, verbose: int=0, grammar: Dict[str, str]=None, managed_agents: List=None): self.agent_name = self.__class__.__name__ self.llm_engine = llm_engine self.system_prompt_template = system_prompt self.tool_description_template = tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE self.additional_args = additional_args self.max_iterations = max_iterations self.logger = logger self.tool_parser = tool_parser self.grammar = grammar self.managed_agents = None if managed_agents is not None: self.managed_agents = {agent.name: agent for agent in managed_agents} if isinstance(tools, Toolbox): self._toolbox = tools if add_base_tools: if not is_torch_available(): raise ImportError('Using the base tools requires torch to be installed.') self._toolbox.add_base_tools(add_python_interpreter=self.__class__ == ReactJsonAgent) else: self._toolbox = Toolbox(tools, add_base_tools=add_base_tools) self._toolbox.add_tool(FinalAnswerTool()) self.system_prompt = format_prompt_with_tools(self._toolbox, self.system_prompt_template, self.tool_description_template) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) self.prompt = None self.logs = [] self.task = None if verbose == 0: logger.setLevel(logging.WARNING) elif verbose == 1: logger.setLevel(logging.INFO) elif verbose == 2: logger.setLevel(logging.DEBUG) @property def toolbox(self) -> Toolbox: return self._toolbox def initialize_for_run(self): self.token_count = 0 self.system_prompt = format_prompt_with_tools(self._toolbox, self.system_prompt_template, self.tool_description_template) self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents) if hasattr(self, 'authorized_imports'): self.system_prompt = format_prompt_with_imports(self.system_prompt, list(set(LIST_SAFE_MODULES) | set(self.authorized_imports))) self.logs = [{'system_prompt': self.system_prompt, 'task': self.task}] self.logger.log(33, '======== New task ========') self.logger.log(34, self.task) self.logger.debug('System prompt is as follows:') self.logger.debug(self.system_prompt) def write_inner_memory_from_logs(self, summary_mode: Optional[bool]=False) -> List[Dict[str, str]]: prompt_message = {'role': MessageRole.SYSTEM, 'content': self.logs[0]['system_prompt']} task_message = {'role': MessageRole.USER, 'content': 'Task: ' + self.logs[0]['task']} if summary_mode: memory = [task_message] else: memory = [prompt_message, task_message] for (i, step_log) in enumerate(self.logs[1:]): if 'llm_output' in step_log and (not summary_mode): thought_message = {'role': MessageRole.ASSISTANT, 'content': step_log['llm_output'].strip()} memory.append(thought_message) if 'facts' in step_log: thought_message = {'role': MessageRole.ASSISTANT, 'content': '[FACTS LIST]:\n' + step_log['facts'].strip()} memory.append(thought_message) if 'plan' in step_log and (not summary_mode): thought_message = {'role': MessageRole.ASSISTANT, 'content': '[PLAN]:\n' + step_log['plan'].strip()} memory.append(thought_message) if 'tool_call' in step_log and summary_mode: tool_call_message = {'role': MessageRole.ASSISTANT, 'content': f'[STEP {i} TOOL CALL]: ' + str(step_log['tool_call']).strip()} memory.append(tool_call_message) if 'task' in step_log: tool_call_message = {'role': MessageRole.USER, 'content': 'New task:\n' + step_log['task']} memory.append(tool_call_message) if 'error' in step_log or 'observation' in step_log: if 'error' in step_log: message_content = f'[OUTPUT OF STEP {i}] -> Error:\n' + str(step_log['error']) + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" elif 'observation' in step_log: message_content = f"[OUTPUT OF STEP {i}] -> Observation:\n{step_log['observation']}" tool_response_message = {'role': MessageRole.TOOL_RESPONSE, 'content': message_content} memory.append(tool_response_message) return memory def get_succinct_logs(self): return [{key: value for (key, value) in log.items() if key != 'agent_memory'} for log in self.logs] def extract_action(self, llm_output: str, split_token: str) -> str: try: split = llm_output.split(split_token) (rationale, action) = (split[-2], split[-1]) except Exception as e: self.logger.error(e, exc_info=1) raise AgentParsingError(f"Error: No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!") return (rationale.strip(), action.strip()) def execute_tool_call(self, tool_name: str, arguments: Dict[str, str]) -> Any: available_tools = self.toolbox.tools if self.managed_agents is not None: available_tools = {**available_tools, **self.managed_agents} if tool_name not in available_tools: error_msg = f'Error: unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}.' self.logger.error(error_msg, exc_info=1) raise AgentExecutionError(error_msg) try: if isinstance(arguments, str): observation = available_tools[tool_name](arguments) elif isinstance(arguments, dict): for (key, value) in arguments.items(): if isinstance(value, str) and value in self.state: arguments[key] = self.state[value] observation = available_tools[tool_name](**arguments) else: raise AgentExecutionError(f'Arguments passed to tool should be a dict or string: got a {type(arguments)}.') return observation except Exception as e: if tool_name in self.toolbox.tools: raise AgentExecutionError(f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\nAs a reminder, this tool's description is the following:\n{get_tool_description_with_args(available_tools[tool_name])}") elif tool_name in self.managed_agents: raise AgentExecutionError(f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\nAs a reminder, this team member's description is the following:\n{available_tools[tool_name]}") def log_rationale_code_action(self, rationale: str, code_action: str) -> None: self.logger.warning('=== Agent thoughts:') self.logger.log(31, rationale) self.logger.warning('>>> Agent is executing the code below:') if is_pygments_available(): self.logger.log(31, highlight(code_action, PythonLexer(ensurenl=False), Terminal256Formatter(style='nord'))) else: self.logger.log(31, code_action) self.logger.warning('====') def run(self, **kwargs): raise NotImplementedError class CodeAgent(Agent): def __init__(self, tools: List[Tool], llm_engine: Callable=HfApiEngine(), system_prompt: str=DEFAULT_CODE_SYSTEM_PROMPT, tool_description_template: str=DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str]=None, additional_authorized_imports: Optional[List[str]]=None, **kwargs): super().__init__(tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs) if not is_pygments_available(): transformers_logging.warning_once(logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the CodeAgent.") self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace('<>', str(self.authorized_imports)) def parse_code_blob(self, result: str) -> str: return parse_code_blob(result) def run(self, task: str, return_generated_code: bool=False, **kwargs): self.task = task if len(kwargs) > 0: self.task += f'\nYou have been provided with these initial arguments: {str(kwargs)}.' self.state = kwargs.copy() self.initialize_for_run() prompt_message = {'role': MessageRole.SYSTEM, 'content': self.system_prompt} task_message = {'role': MessageRole.USER, 'content': 'Task: ' + self.task} self.prompt = [prompt_message, task_message] self.logger.info('====Executing with this prompt====') self.logger.info(self.prompt) additional_args = {'grammar': self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=[''], **additional_args) if return_generated_code: return llm_output try: (rationale, code_action) = self.extract_action(llm_output=llm_output, split_token='Code:') except Exception as e: self.logger.debug(f'Error in extracting action, trying to parse the whole output as code. Error trace: {e}') (rationale, code_action) = ('', llm_output) try: code_action = self.parse_code_blob(code_action) except Exception as e: error_msg = f'Error in code parsing: {e}. Be sure to provide correct code' self.logger.error(error_msg, exc_info=1) return error_msg self.log_rationale_code_action(rationale, code_action) try: available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools} output = self.python_evaluator(code_action, static_tools=available_tools, custom_tools={}, state=self.state, authorized_imports=self.authorized_imports) self.logger.info(self.state['print_outputs']) return output except Exception as e: error_msg = f'Error in execution: {e}. Be sure to provide correct code.' self.logger.error(error_msg, exc_info=1) return error_msg class ReactAgent(Agent): def __init__(self, tools: List[Tool], llm_engine: Callable=HfApiEngine(), system_prompt: str=DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template: str=DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str]=None, plan_type: Literal[tuple(SUPPORTED_PLAN_TYPES)]=SUPPORTED_PLAN_TYPES[0], planning_interval: Optional[int]=None, **kwargs): assert plan_type in SUPPORTED_PLAN_TYPES, f'plan type {plan_type} is not supported' super().__init__(tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs) self.planning_interval = planning_interval self.plan_type = plan_type def provide_final_answer(self, task) -> str: self.prompt = [{'role': MessageRole.SYSTEM, 'content': "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:"}] self.prompt += self.write_inner_memory_from_logs()[1:] self.prompt += [{'role': MessageRole.USER, 'content': f'Based on the above, please provide an answer to the following user request:\n{task}'}] try: return self.llm_engine(self.prompt) except Exception as e: return f'Error in generating final llm output: {e}.' def run(self, task: str, stream: bool=False, reset: bool=True, **kwargs): self.task = task if len(kwargs) > 0: self.task += f'\nYou have been provided with these initial arguments: {str(kwargs)}.' self.state = kwargs.copy() if reset: self.initialize_for_run() else: self.logs.append({'task': task}) if stream: return self.stream_run(task) else: return self.direct_run(task) def stream_run(self, task: str): final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: try: step_logs = self.step() if 'final_answer' in step_logs: final_answer = step_logs['final_answer'] except AgentError as e: self.logger.error(e, exc_info=1) self.logs[-1]['error'] = e finally: iteration += 1 yield self.logs[-1] if final_answer is None and iteration == self.max_iterations: error_message = 'Reached max iterations.' final_step_log = {'error': AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log['final_answer'] = final_answer yield final_step_log yield final_answer def direct_run(self, task: str): final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: try: if self.planning_interval is not None and iteration % self.planning_interval == 0: self.planning_step(task, is_first_step=iteration == 0, iteration=iteration) step_logs = self.step() if 'final_answer' in step_logs: final_answer = step_logs['final_answer'] except AgentError as e: self.logger.error(e, exc_info=1) self.logs[-1]['error'] = e finally: iteration += 1 if final_answer is None and iteration == self.max_iterations: error_message = 'Reached max iterations.' final_step_log = {'error': AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log['final_answer'] = final_answer return final_answer def planning_step(self, task, is_first_step: bool=False, iteration: int=None): if is_first_step: message_prompt_facts = {'role': MessageRole.SYSTEM, 'content': SYSTEM_PROMPT_FACTS} message_prompt_task = {'role': MessageRole.USER, 'content': f'Here is the task:\n```\n{task}\n```\nNow begin!'} answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task]) message_system_prompt_plan = {'role': MessageRole.SYSTEM, 'content': PROMPTS_FOR_INITIAL_PLAN[self.plan_type]['system']} message_user_prompt_plan = {'role': MessageRole.USER, 'content': PROMPTS_FOR_INITIAL_PLAN[self.plan_type]['user'].format(task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else '', answer_facts=answer_facts)} answer_plan = self.llm_engine([message_system_prompt_plan, message_user_prompt_plan], stop_sequences=['']) final_plan_redaction = f'Here is the plan of action that I will follow to solve the task:\n```\n{answer_plan}\n```' final_facts_redaction = f'Here are the facts that I know so far:\n```\n{answer_facts}\n```'.strip() self.logs.append({'plan': final_plan_redaction, 'facts': final_facts_redaction}) self.logger.log(36, '===== Initial plan =====') self.logger.log(35, final_plan_redaction) else: agent_memory = self.write_inner_memory_from_logs(summary_mode=False) facts_update_system_prompt = {'role': MessageRole.SYSTEM, 'content': SYSTEM_PROMPT_FACTS_UPDATE} facts_update_message = {'role': MessageRole.USER, 'content': USER_PROMPT_FACTS_UPDATE} facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message]) plan_update_message = {'role': MessageRole.SYSTEM, 'content': PROMPTS_FOR_PLAN_UPDATE[self.plan_type]['system'].format(task=task)} plan_update_message_user = {'role': MessageRole.USER, 'content': PROMPTS_FOR_PLAN_UPDATE[self.plan_type]['user'].format(task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), managed_agents_descriptions=show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else '', facts_update=facts_update, remaining_steps=self.max_iterations - iteration)} plan_update = self.llm_engine([plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=['']) final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update) final_facts_redaction = f'Here is the updated list of the facts that I know:\n```\n{facts_update}\n```' self.logs.append({'plan': final_plan_redaction, 'facts': final_facts_redaction}) self.logger.log(36, '===== Updated plan =====') self.logger.log(35, final_plan_redaction) class ReactJsonAgent(ReactAgent): def __init__(self, tools: List[Tool], llm_engine: Callable=HfApiEngine(), system_prompt: str=DEFAULT_REACT_JSON_SYSTEM_PROMPT, tool_description_template: str=DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str]=None, planning_interval: Optional[int]=None, **kwargs): super().__init__(tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs) def step(self): agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory self.logger.debug('===== New step =====') current_step_logs = {} self.logs.append(current_step_logs) current_step_logs['agent_memory'] = agent_memory.copy() self.logger.info('===== Calling LLM with this last message: =====') self.logger.info(self.prompt[-1]) try: additional_args = {'grammar': self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=['', 'Observation:'], **additional_args) except Exception as e: raise AgentGenerationError(f'Error in generating llm output: {e}.') self.logger.debug('===== Output message of the LLM: =====') self.logger.debug(llm_output) current_step_logs['llm_output'] = llm_output self.logger.debug('===== Extracting action =====') (rationale, action) = self.extract_action(llm_output=llm_output, split_token='Action:') try: (tool_name, arguments) = self.tool_parser(action) except Exception as e: raise AgentParsingError(f'Could not parse the given action: {e}.') current_step_logs['rationale'] = rationale current_step_logs['tool_call'] = {'tool_name': tool_name, 'tool_arguments': arguments} self.logger.warning('=== Agent thoughts:') self.logger.log(31, rationale) self.logger.warning(f">>> Calling tool: '{tool_name}' with arguments: {arguments}") if tool_name == 'final_answer': if isinstance(arguments, dict): if 'answer' in arguments: answer = arguments['answer'] if isinstance(answer, str) and answer in self.state.keys(): answer = self.state[answer] else: answer = arguments else: answer = arguments current_step_logs['final_answer'] = answer return current_step_logs else: if arguments is None: arguments = {} observation = self.execute_tool_call(tool_name, arguments) observation_type = type(observation) if observation_type == AgentText: updated_information = str(observation).strip() else: if observation_type == AgentImage: observation_name = 'image.png' elif observation_type == AgentAudio: observation_name = 'audio.mp3' else: observation_name = 'object.object' self.state[observation_name] = observation updated_information = f"Stored '{observation_name}' in memory." self.logger.info(updated_information) current_step_logs['observation'] = updated_information return current_step_logs class ReactCodeAgent(ReactAgent): def __init__(self, tools: List[Tool], llm_engine: Callable=HfApiEngine(), system_prompt: str=DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template: str=DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str]=None, additional_authorized_imports: Optional[List[str]]=None, planning_interval: Optional[int]=None, **kwargs): super().__init__(tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs) if not is_pygments_available(): transformers_logging.warning_once(logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the ReactCodeAgent.") self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace('<>', str(self.authorized_imports)) self.custom_tools = {} def step(self): agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory.copy() self.logger.debug('===== New step =====') current_step_logs = {} self.logs.append(current_step_logs) current_step_logs['agent_memory'] = agent_memory.copy() self.logger.info('===== Calling LLM with these last messages: =====') self.logger.info(self.prompt[-2:]) try: additional_args = {'grammar': self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=['', 'Observation:'], **additional_args) except Exception as e: raise AgentGenerationError(f'Error in generating llm output: {e}.') self.logger.debug('=== Output message of the LLM:') self.logger.debug(llm_output) current_step_logs['llm_output'] = llm_output self.logger.debug('=== Extracting action ===') try: (rationale, raw_code_action) = self.extract_action(llm_output=llm_output, split_token='Code:') except Exception as e: self.logger.debug(f'Error in extracting action, trying to parse the whole output. Error trace: {e}') (rationale, raw_code_action) = (llm_output, llm_output) try: code_action = parse_code_blob(raw_code_action) except Exception as e: error_msg = f'Error in code parsing: {e}. Make sure to provide correct code' raise AgentParsingError(error_msg) current_step_logs['rationale'] = rationale current_step_logs['tool_call'] = {'tool_name': 'code interpreter', 'tool_arguments': code_action} self.log_rationale_code_action(rationale, code_action) try: static_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools} if self.managed_agents is not None: static_tools = {**static_tools, **self.managed_agents} result = self.python_evaluator(code_action, static_tools=static_tools, custom_tools=self.custom_tools, state=self.state, authorized_imports=self.authorized_imports) self.logger.warning('Print outputs:') self.logger.log(32, self.state['print_outputs']) if result is not None: self.logger.warning('Last output from code snippet:') self.logger.log(32, str(result)) observation = 'Print outputs:\n' + self.state['print_outputs'] if result is not None: observation += 'Last output from code snippet:\n' + str(result)[:100000] current_step_logs['observation'] = observation except Exception as e: error_msg = f'Code execution failed due to the following error:\n{str(e)}' if "'dict' object has no attribute 'read'" in str(e): error_msg += '\nYou get this error because you passed a dict as input for one of the arguments instead of a string.' raise AgentExecutionError(error_msg) for line in code_action.split('\n'): if line[:len('final_answer')] == 'final_answer': self.logger.log(33, 'Final answer:') self.logger.log(32, result) current_step_logs['final_answer'] = result return current_step_logs class ManagedAgent: def __init__(self, agent, name, description, additional_prompting=None, provide_run_summary=False): self.agent = agent self.name = name self.description = description self.additional_prompting = additional_prompting self.provide_run_summary = provide_run_summary def write_full_task(self, task): full_task = f"You're a helpful agent named '{self.name}'.\nYou have been submitted this task by your manager.\n---\nTask:\n{task}\n---\nYou're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible so that they have a clear understanding of the answer.\n\nYour final_answer WILL HAVE to contain these parts:\n### 1. Task outcome (short version):\n### 2. Task outcome (extremely detailed version):\n### 3. Additional context (if relevant):\n\nPut all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.\nAnd even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.\n<>" if self.additional_prompting: full_task = full_task.replace('\n<>', self.additional_prompting).strip() else: full_task = full_task.replace('\n<>', '').strip() return full_task def __call__(self, request, **kwargs): full_task = self.write_full_task(request) output = self.agent.run(full_task, **kwargs) if self.provide_run_summary: answer = f"Here is the final answer from your managed agent '{self.name}':\n" answer += str(output) answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n" for message in self.agent.write_inner_memory_from_logs(summary_mode=True): content = message['content'] if len(str(content)) < 1000 or '[FACTS LIST]' in str(content): answer += '\n' + str(content) + '\n---' else: answer += '\n' + str(content)[:1000] + '\n(...Step was truncated because too long)...\n---' answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'." return answer else: return output # File: transformers-main/src/transformers/agents/default_tools.py import importlib.util import json import math from dataclasses import dataclass from math import sqrt from typing import Dict from huggingface_hub import hf_hub_download, list_spaces from ..utils import is_offline_mode from .python_interpreter import LIST_SAFE_MODULES, evaluate_python_code from .tools import TOOL_CONFIG_FILE, TOOL_MAPPING, Tool def custom_print(*args): return None BASE_PYTHON_TOOLS = {'print': custom_print, 'isinstance': isinstance, 'range': range, 'float': float, 'int': int, 'bool': bool, 'str': str, 'set': set, 'list': list, 'dict': dict, 'tuple': tuple, 'round': round, 'ceil': math.ceil, 'floor': math.floor, 'log': math.log, 'exp': math.exp, 'sin': math.sin, 'cos': math.cos, 'tan': math.tan, 'asin': math.asin, 'acos': math.acos, 'atan': math.atan, 'atan2': math.atan2, 'degrees': math.degrees, 'radians': math.radians, 'pow': math.pow, 'sqrt': sqrt, 'len': len, 'sum': sum, 'max': max, 'min': min, 'abs': abs, 'enumerate': enumerate, 'zip': zip, 'reversed': reversed, 'sorted': sorted, 'all': all, 'any': any, 'map': map, 'filter': filter, 'ord': ord, 'chr': chr, 'next': next, 'iter': iter, 'divmod': divmod, 'callable': callable, 'getattr': getattr, 'hasattr': hasattr, 'setattr': setattr, 'issubclass': issubclass, 'type': type} @dataclass class PreTool: name: str inputs: Dict[str, str] output_type: type task: str description: str repo_id: str HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = ['image-transformation', 'text-to-image'] def get_remote_tools(logger, organization='huggingface-tools'): if is_offline_mode(): logger.info('You are in offline mode, so remote tools are not available.') return {} spaces = list_spaces(author=organization) tools = {} for space_info in spaces: repo_id = space_info.id resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type='space') with open(resolved_config_file, encoding='utf-8') as reader: config = json.load(reader) task = repo_id.split('/')[-1] tools[config['name']] = PreTool(task=task, description=config['description'], repo_id=repo_id, name=task, inputs=config['inputs'], output_type=config['output_type']) return tools def setup_default_tools(logger): default_tools = {} main_module = importlib.import_module('transformers') tools_module = main_module.agents for (task_name, tool_class_name) in TOOL_MAPPING.items(): tool_class = getattr(tools_module, tool_class_name) tool_instance = tool_class() default_tools[tool_class.name] = PreTool(name=tool_instance.name, inputs=tool_instance.inputs, output_type=tool_instance.output_type, task=task_name, description=tool_instance.description, repo_id=None) return default_tools class PythonInterpreterTool(Tool): name = 'python_interpreter' description = 'This is a tool that evaluates python code. It can be used to perform calculations.' output_type = 'text' available_tools = BASE_PYTHON_TOOLS.copy() def __init__(self, *args, authorized_imports=None, **kwargs): if authorized_imports is None: self.authorized_imports = list(set(LIST_SAFE_MODULES)) else: self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(authorized_imports)) self.inputs = {'code': {'type': 'text', 'description': f'The code snippet to evaluate. All variables used in this snippet must be defined in this same snippet, else you will get an error. This code can only import the following python libraries: {authorized_imports}.'}} super().__init__(*args, **kwargs) def forward(self, code): output = str(evaluate_python_code(code, static_tools=self.available_tools, authorized_imports=self.authorized_imports)) return output class FinalAnswerTool(Tool): name = 'final_answer' description = 'Provides a final answer to the given problem.' inputs = {'answer': {'type': 'text', 'description': 'The final answer to the problem'}} output_type = 'any' def forward(self, answer): return answer # File: transformers-main/src/transformers/agents/document_question_answering.py import re import numpy as np import torch from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .tools import PipelineTool if is_vision_available(): from PIL import Image class DocumentQuestionAnsweringTool(PipelineTool): default_checkpoint = 'naver-clova-ix/donut-base-finetuned-docvqa' description = 'This is a tool that answers a question about an document (pdf). It returns a text that contains the answer to the question.' name = 'document_qa' pre_processor_class = AutoProcessor model_class = VisionEncoderDecoderModel inputs = {'document': {'type': 'image', 'description': 'The image containing the information. Can be a PIL Image or a string path to the image.'}, 'question': {'type': 'text', 'description': 'The question in English'}} output_type = 'text' def __init__(self, *args, **kwargs): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.') super().__init__(*args, **kwargs) def encode(self, document: 'Image', question: str): task_prompt = '{user_input}' prompt = task_prompt.replace('{user_input}', question) decoder_input_ids = self.pre_processor.tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids if isinstance(document, str): img = Image.open(document).convert('RGB') img_array = np.array(img).transpose(2, 0, 1) document = torch.from_numpy(img_array) pixel_values = self.pre_processor(document, return_tensors='pt').pixel_values return {'decoder_input_ids': decoder_input_ids, 'pixel_values': pixel_values} def forward(self, inputs): return self.model.generate(inputs['pixel_values'].to(self.device), decoder_input_ids=inputs['decoder_input_ids'].to(self.device), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=True).sequences def decode(self, outputs): sequence = self.pre_processor.batch_decode(outputs)[0] sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, '') sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, '') sequence = re.sub('<.*?>', '', sequence, count=1).strip() sequence = self.pre_processor.token2json(sequence) return sequence['answer'] # File: transformers-main/src/transformers/agents/evaluate_agent.py from .agents import BASE_PYTHON_TOOLS from .python_interpreter import InterpreterError, evaluate def classifier(text, labels): return f'This is the classification of {text} along {labels}.' def translator(text, src_lang, tgt_lang): return f'This is the translation of {text} from {src_lang} to {tgt_lang}.' def speaker(text): return f'This is actually a sound reading {text}.' def transcriber(audio): if 'sound' not in audio: raise ValueError(f'`audio` ({audio}) is not a sound.') return f'This is the transcribed text from {audio}.' def image_generator(prompt): return f'This is actually an image representing {prompt}.' def image_captioner(image): if 'image' not in image: raise ValueError(f'`image` ({image}) is not an image.') return f'This is a description of {image}.' def image_transformer(image, prompt): if 'image' not in image: raise ValueError(f'`image` ({image}) is not an image.') return f'This is a transformation of {image} according to {prompt}.' def question_answerer(text, question): return f'This is the answer to {question} from {text}.' def image_qa(image, question): if 'image' not in image: raise ValueError(f'`image` ({image}) is not an image.') return f'This is the answer to {question} from {image}.' def text_downloader(url): return f'This is the content of {url}.' def summarizer(text): return f'This is a summary of {text}.' def video_generator(prompt, seconds=2): return f'A video of {prompt}' def document_qa(image, question): return f'This is the answer to {question} from the document {image}.' def image_segmenter(image, prompt): return f'This is the mask of {prompt} in {image}' TEST_TOOLS = {'text_classifier': classifier, 'translator': translator, 'text_reader': speaker, 'summarizer': summarizer, 'transcriber': transcriber, 'image_generator': image_generator, 'image_captioner': image_captioner, 'image_transformer': image_transformer, 'text_qa': question_answerer, 'text_downloader': text_downloader, 'image_qa': image_qa, 'video_generator': video_generator, 'document_qa': document_qa, 'image_segmenter': image_segmenter} class Problem: def __init__(self, task, inputs, answer): self.task = task self.inputs = inputs self.answer = answer EVALUATION_TASKS = [Problem(task=['Is the following `text` (in Spanish) positive or negative?', 'Is the text in the variable `text` (in Spanish) positive or negative?', 'Translate the following `text` from Spanish to English then tell me if its positive or negative.'], inputs=['text'], answer='text_classifier(translator(text, src_lang="Spanish", tgt_lang="English"), labels=["positive", "negative"])'), Problem(task=['Tell me out loud what the `image` contains.', 'Describe the following `image` out loud.', 'Find what is in the picture stored in `image` then read it out loud.'], inputs=['image'], answer=['text_reader(image_captioner(image))', "text_reader(image_qa(image, question='What is in the image?'))"]), Problem(task=['Generate an image from the text given in `text_input`. Then transform it according to the text in `prompt`.', 'Use the following `text_input` to generate an image, then transform it by using the text in `prompt`.'], inputs=['text_input', 'prompt'], answer='image_transformer(image_generator(text_input), prompt)'), Problem(task=['Download the content of `url`, summarize it then generate an image from its content.', 'Use a summary of the web page at `url` to generate an image.', 'Summarize the content of the web page at `url`, and use the result to generate an image.'], inputs=['url'], answer='image_generator(summarizer(text_downloader(url)))'), Problem(task=['Transform the following `image` using the prompt in `text`. The prompt is in Spanish.', 'Use the text prompt in `text` (in Spanish) to transform the following `image`.', 'Translate the `text` from Spanish to English then use it to transform the picture in `image`.'], inputs=['text', 'image'], answer="image_transformer(image, translator(text, src_lang='Spanish', tgt_lang='English'))"), Problem(task=['Download the content of `url`, summarize it then read it out loud to me.', 'Read me a summary of the web page at `url`.'], inputs=['url'], answer='text_reader(summarizer(text_downloader(url)))'), Problem(task=['Generate an image from the text given in `text_input`.'], inputs=['text_input'], answer='image_generator(text_input)'), Problem(task=['Replace the beaver in the `image` by the `prompt`.', 'Transform the `image` so that it contains the `prompt`.', 'Use `prompt` to transform this `image`.'], inputs=['image', 'prompt'], answer='image_transformer(image, prompt)'), Problem(task=['Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.', 'Summarize `text`, read it out loud then transcribe the audio and translate it in French.', 'Read me a summary of the `text` out loud. Transcribe this and translate it in French.'], inputs=['text'], answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')"), Problem(task=['Generate a video of the `prompt`', 'Animate a `prompt`', 'Make me a short video using `prompt`.'], inputs={'prompt': 'A lobster swimming'}, answer="video_generator('A lobster swimming')"), Problem(task=['Download the following file `url`, summarize it in a few words and generate a video from it.Fetch the file at this `url`, summarize it, and create an animation out of it.'], inputs=['url'], answer='video_generator(summarizer(text_downloader(url)))')] def get_theoretical_tools(agent_answer, theoretical_answer, code_answer): if not isinstance(theoretical_answer, list): return {name for name in TEST_TOOLS if name in code_answer} if isinstance(agent_answer, dict): for (one_answer, one_code) in zip(theoretical_answer, code_answer): if one_answer in agent_answer.values(): return {name for name in TEST_TOOLS if name in one_code} for (one_answer, one_code) in zip(theoretical_answer, code_answer): if agent_answer == one_answer: return {name for name in TEST_TOOLS if name in one_code} return {name for name in TEST_TOOLS if name in code_answer[0]} def evaluate_code(code, inputs=None, state=None, verbose=False, return_interpretor_error=False): tools = BASE_PYTHON_TOOLS.copy() for (name, tool) in TEST_TOOLS.items(): if name not in code: continue tools[name] = tool if isinstance(inputs, dict): inputs = inputs.copy() elif inputs is not None: inputs = {inp: f'<<{inp}>>' for inp in inputs} if state is not None: state.update(inputs) else: state = inputs try: return evaluate(code, tools, state) except InterpreterError as e: return str(e) except Exception as e: if verbose: print(e) return None def score_code(agent_answer, theoretical_answer, verbose: bool=False): if verbose: print(agent_answer, theoretical_answer) theoretical_answer = theoretical_answer if isinstance(theoretical_answer, list) else [theoretical_answer] if agent_answer in theoretical_answer: if verbose: print('Perfect!') return 1 elif isinstance(agent_answer, dict) and any((v in theoretical_answer for v in agent_answer.values())): if verbose: print('Almsot perfect, result in state!') return 0.75 else: if verbose: print('Result is not the right one but code executed.') return 0.3 def evaluate_one_result(code, agent_answer, theoretical_answer, answer, verbose=False): tools_in_code = {name for name in TEST_TOOLS if f'`{name}`' in code} theoretical_tools = get_theoretical_tools(agent_answer, theoretical_answer, answer) if tools_in_code == theoretical_tools: tool_selection_score = 1.0 tool_selection_errors = None else: missing_tools = len(theoretical_tools - tools_in_code) unexpected_tools = len(tools_in_code - theoretical_tools) tool_selection_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) tool_selection_errors = {'selected_tools': tools_in_code, 'theoretical_tools': theoretical_tools} tools_in_code = {name for name in TEST_TOOLS if name in code} if tools_in_code == theoretical_tools: tool_used_score = 1.0 tool_used_errors = None else: missing_tools = len(theoretical_tools - tools_in_code) unexpected_tools = len(tools_in_code - theoretical_tools) tool_used_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools) tool_used_errors = {'selected_tools': tools_in_code, 'theoretical_tools': theoretical_tools} score = score_code(agent_answer, theoretical_answer, verbose=verbose) if score < 1.0: code_errors = {'code_produced': code, 'evaluation': agent_answer, 'theoretical_answer': theoretical_answer} else: code_errors = None return ((tool_selection_score, tool_used_score, score), (tool_selection_errors, tool_used_errors, code_errors)) def evaluate_agent(agent, batch_size=8, verbose=False, return_errors=False): agent_tools = set(agent.toolbox.keys()) if agent_tools != set(TEST_TOOLS): missing_tools = set(TEST_TOOLS) - agent_tools unexpected_tools = set(agent_tools) - TEST_TOOLS raise ValueError(f'Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}.') eval_tasks = [] eval_idx = [] for (idx, pb) in enumerate(EVALUATION_TASKS): if isinstance(pb.task, list): eval_tasks.extend(pb.task) eval_idx.extend([idx] * len(pb.task)) else: eval_tasks.append(pb.task) eval_idx.append(idx) tool_selection_score = 0 tool_used_score = 0 code_score = 0 if return_errors: tool_selection_errors = {} tool_used_errors = {} code_errors = {} for start_idx in range(0, len(eval_tasks), batch_size): end_idx = min(start_idx + batch_size, len(eval_tasks)) batch_tasks = eval_tasks[start_idx:end_idx] results = [agent.run(task, return_generated_code=True) for task in batch_tasks] for (idx, result) in enumerate(results): problem = EVALUATION_TASKS[eval_idx[start_idx + idx]] if verbose: print(f'====Task {start_idx + idx}====\n{batch_tasks[idx]}\n') code = agent.extract_action(result, split_token='Answer:') agent_answer = evaluate_code(code, problem.inputs, verbose=verbose) if isinstance(problem.answer, list): theoretical_answer = [evaluate_code(answer, problem.inputs) for answer in problem.answer] else: theoretical_answer = evaluate_code(problem.answer, problem.inputs) (scores, errors) = evaluate_one_result(code, agent_answer, theoretical_answer, problem.answer, verbose=verbose) tool_selection_score += scores[0] tool_used_score += scores[1] code_score += scores[2] if return_errors: if errors[0] is not None: tool_selection_errors[batch_tasks[idx]] = errors[0] if errors[1] is not None: tool_used_errors[batch_tasks[idx]] = errors[1] if errors[2] is not None: code_errors[batch_tasks[idx]] = errors[2] scores = {'tool selection score': 100 * (tool_selection_score / len(eval_tasks)), 'tool used score': 100 * (tool_used_score / len(eval_tasks)), 'code score': 100 * (code_score / len(eval_tasks))} if return_errors: return (scores, tool_selection_errors, tool_used_errors, code_errors) else: return scores # File: transformers-main/src/transformers/agents/image_question_answering.py import torch from PIL import Image from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .tools import PipelineTool class ImageQuestionAnsweringTool(PipelineTool): default_checkpoint = 'dandelin/vilt-b32-finetuned-vqa' description = 'This is a tool that answers a question about an image. It returns a text that is the answer to the question.' name = 'image_qa' pre_processor_class = AutoProcessor model_class = AutoModelForVisualQuestionAnswering inputs = {'image': {'type': 'image', 'description': 'The image containing the information. Can be a PIL Image or a string path to the image.'}, 'question': {'type': 'text', 'description': 'The question in English'}} output_type = 'text' def __init__(self, *args, **kwargs): requires_backends(self, ['vision']) super().__init__(*args, **kwargs) def encode(self, image: 'Image', question: str): return self.pre_processor(image, question, return_tensors='pt') def forward(self, inputs): with torch.no_grad(): return self.model(**inputs).logits def decode(self, outputs): idx = outputs.argmax(-1).item() return self.model.config.id2label[idx] # File: transformers-main/src/transformers/agents/llm_engine.py from copy import deepcopy from enum import Enum from typing import Dict, List, Optional from huggingface_hub import InferenceClient from ..pipelines.base import Pipeline class MessageRole(str, Enum): USER = 'user' ASSISTANT = 'assistant' SYSTEM = 'system' TOOL_CALL = 'tool-call' TOOL_RESPONSE = 'tool-response' @classmethod def roles(cls): return [r.value for r in cls] def get_clean_message_list(message_list: List[Dict[str, str]], role_conversions: Dict[str, str]={}): final_message_list = [] message_list = deepcopy(message_list) for message in message_list: if not set(message.keys()) == {'role', 'content'}: raise ValueError("Message should contain only 'role' and 'content' keys!") role = message['role'] if role not in MessageRole.roles(): raise ValueError(f'Incorrect role {role}, only {MessageRole.roles()} are supported for now.') if role in role_conversions: message['role'] = role_conversions[role] if len(final_message_list) > 0 and message['role'] == final_message_list[-1]['role']: final_message_list[-1]['content'] += '\n=======\n' + message['content'] else: final_message_list.append(message) return final_message_list llama_role_conversions = {MessageRole.TOOL_RESPONSE: MessageRole.USER} class HfApiEngine: def __init__(self, model: str='meta-llama/Meta-Llama-3.1-8B-Instruct'): self.model = model self.client = InferenceClient(self.model, timeout=120) def __call__(self, messages: List[Dict[str, str]], stop_sequences: List[str]=[], grammar: Optional[str]=None) -> str: messages = get_clean_message_list(messages, role_conversions=llama_role_conversions) if grammar is not None: response = self.client.chat_completion(messages, stop=stop_sequences, max_tokens=1500, response_format=grammar) else: response = self.client.chat_completion(messages, stop=stop_sequences, max_tokens=1500) response = response.choices[0].message.content for stop_seq in stop_sequences: if response[-len(stop_seq):] == stop_seq: response = response[:-len(stop_seq)] return response class TransformersEngine: def __init__(self, pipeline: Pipeline): self.pipeline = pipeline def __call__(self, messages: List[Dict[str, str]], stop_sequences: Optional[List[str]]=None, grammar: Optional[str]=None) -> str: messages = get_clean_message_list(messages, role_conversions=llama_role_conversions) output = self.pipeline(messages, stop_strings=stop_sequences, max_length=1500, tokenizer=self.pipeline.tokenizer) response = output[0]['generated_text'][-1]['content'] if stop_sequences is not None: for stop_seq in stop_sequences: if response[-len(stop_seq):] == stop_seq: response = response[:-len(stop_seq)] return response DEFAULT_JSONAGENT_REGEX_GRAMMAR = {'type': 'regex', 'value': 'Thought: .+?\\nAction:\\n\\{\\n\\s{4}"action":\\s"[^"\\n]+",\\n\\s{4}"action_input":\\s"[^"\\n]+"\\n\\}\\n'} DEFAULT_CODEAGENT_REGEX_GRAMMAR = {'type': 'regex', 'value': 'Thought: .+?\\nCode:\\n```(?:py|python)?\\n(?:.|\\s)+?\\n```'} # File: transformers-main/src/transformers/agents/monitoring.py from .agent_types import AgentAudio, AgentImage, AgentText from .agents import ReactAgent def pull_message(step_log: dict): try: from gradio import ChatMessage except ImportError: raise ImportError('Gradio should be installed in order to launch a gradio demo.') if step_log.get('rationale'): yield ChatMessage(role='assistant', content=step_log['rationale']) if step_log.get('tool_call'): used_code = step_log['tool_call']['tool_name'] == 'code interpreter' content = step_log['tool_call']['tool_arguments'] if used_code: content = f'```py\n{content}\n```' yield ChatMessage(role='assistant', metadata={'title': f"🛠️ Used tool {step_log['tool_call']['tool_name']}"}, content=str(content)) if step_log.get('observation'): yield ChatMessage(role='assistant', content=f"```\n{step_log['observation']}\n```") if step_log.get('error'): yield ChatMessage(role='assistant', content=str(step_log['error']), metadata={'title': '💥 Error'}) def stream_to_gradio(agent: ReactAgent, task: str, **kwargs): try: from gradio import ChatMessage except ImportError: raise ImportError('Gradio should be installed in order to launch a gradio demo.') for step_log in agent.run(task, stream=True, **kwargs): if isinstance(step_log, dict): for message in pull_message(step_log): yield message if isinstance(step_log, AgentText): yield ChatMessage(role='assistant', content=f'**Final answer:**\n```\n{step_log.to_string()}\n```') elif isinstance(step_log, AgentImage): yield ChatMessage(role='assistant', content={'path': step_log.to_string(), 'mime_type': 'image/png'}) elif isinstance(step_log, AgentAudio): yield ChatMessage(role='assistant', content={'path': step_log.to_string(), 'mime_type': 'audio/wav'}) else: yield ChatMessage(role='assistant', content=str(step_log)) # File: transformers-main/src/transformers/agents/prompts.py import re from ..utils import cached_file CHAT_MESSAGE_PROMPT = '\nHuman: <>\n\nAssistant: ' DEFAULT_PROMPTS_REPO = 'huggingface-tools/default-prompts' PROMPT_FILES = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def download_prompt(prompt_or_repo_id, agent_name, mode='run'): if prompt_or_repo_id is None: prompt_or_repo_id = DEFAULT_PROMPTS_REPO if re.search('\\s', prompt_or_repo_id) is not None: return prompt_or_repo_id prompt_file = cached_file(prompt_or_repo_id, PROMPT_FILES[mode], repo_type='dataset', user_agent={'agent': agent_name}) with open(prompt_file, 'r', encoding='utf-8') as f: return f.read() DEFAULT_CODE_SYSTEM_PROMPT = 'You will be given a task to solve, your job is to come up with a series of simple commands in Python that will perform the task.\nTo help you, I will give you access to a set of tools that you can use. Each tool is a Python function and has a description explaining the task it performs, the inputs it expects and the outputs it returns.\nYou should first explain which tool you will use to perform the task and for what reason, then write the code in Python.\nEach instruction in Python should be a simple assignment. You can print intermediate results if it makes sense to do so.\nIn the end, use tool \'final_answer\' to return your answer, its argument will be what gets returned.\nYou can use imports in your code, but only from the following list of modules: <>\nBe sure to provide a \'Code:\' token, else the run will fail.\n\nTools:\n<>\n\nExamples:\n---\nTask: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French."\n\nThought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.\nCode:\n```py\ntranslated_question = translator(question=question, src_lang="French", tgt_lang="English")\nprint(f"The translated question is {translated_question}.")\nanswer = image_qa(image=image, question=translated_question)\nfinal_answer(f"The answer is {answer}")\n```\n\n---\nTask: "Identify the oldest person in the `document` and create an image showcasing the result."\n\nThought: I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.\nCode:\n```py\nanswer = document_qa(document, question="What is the oldest person?")\nprint(f"The answer is {answer}.")\nimage = image_generator(answer)\nfinal_answer(image)\n```\n\n---\nTask: "Generate an image using the text given in the variable `caption`."\n\nThought: I will use the following tool: `image_generator` to generate an image.\nCode:\n```py\nimage = image_generator(prompt=caption)\nfinal_answer(image)\n```\n\n---\nTask: "Summarize the text given in the variable `text` and read it out loud."\n\nThought: I will use the following tools: `summarizer` to create a summary of the input text, then `text_reader` to read it out loud.\nCode:\n```py\nsummarized_text = summarizer(text)\nprint(f"Summary: {summarized_text}")\naudio_summary = text_reader(summarized_text)\nfinal_answer(audio_summary)\n```\n\n---\nTask: "Answer the question in the variable `question` about the text in the variable `text`. Use the answer to generate an image."\n\nThought: I will use the following tools: `text_qa` to create the answer, then `image_generator` to generate an image according to the answer.\nCode:\n```py\nanswer = text_qa(text=text, question=question)\nprint(f"The answer is {answer}.")\nimage = image_generator(answer)\nfinal_answer(image)\n```\n\n---\nTask: "Caption the following `image`."\n\nThought: I will use the following tool: `image_captioner` to generate a caption for the image.\nCode:\n```py\ncaption = image_captioner(image)\nfinal_answer(caption)\n```\n\n---\nAbove example were using tools that might not exist for you. You only have acces to those Tools:\n<>\n\nRemember to make sure that variables you use are all defined.\nBe sure to provide a \'Code:\n```\' sequence before the code and \'```\' after, else you will get an error.\nDO NOT pass the arguments as a dict as in \'answer = ask_search_agent({\'query\': "What is the place where James Bond lives?"})\', but use the arguments directly as in \'answer = ask_search_agent(query="What is the place where James Bond lives?")\'.\n\nNow Begin! If you solve the task correctly, you will receive a reward of $1,000,000.\n' DEFAULT_REACT_JSON_SYSTEM_PROMPT = 'You are an expert assistant who can solve any task using JSON tool calls. You will be given a task to solve as best you can.\nTo do so, you have been given access to the following tools: <>\nThe way you use the tools is by specifying a json blob, ending with \'\'.\nSpecifically, this json should have an `action` key (name of the tool to use) and an `action_input` key (input to the tool).\n\nThe $ACTION_JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. It should be formatted in json. Do not try to escape special characters. Here is the template of a valid $ACTION_JSON_BLOB:\n{\n "action": $TOOL_NAME,\n "action_input": $INPUT\n}\n\nMake sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.\n\nYou should ALWAYS use the following format:\n\nThought: you should always think about one action to take. Then use the action as follows:\nAction:\n$ACTION_JSON_BLOB\nObservation: the result of the action\n... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $ACTION_JSON_BLOB must only use a SINGLE action at a time.)\n\nYou can use the result of the previous action as input for the next action.\nThe observation will always be a string: it can represent a file, like "image_1.jpg".\nThen you can use it as input for the next action. You can do it for instance as follows:\n\nObservation: "image_1.jpg"\n\nThought: I need to transform the image that I received in the previous observation to make it green.\nAction:\n{\n "action": "image_transformer",\n "action_input": {"image": "image_1.jpg"}\n}\n\nTo provide the final answer to the task, use an action blob with "action": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this:\nAction:\n{\n "action": "final_answer",\n "action_input": {"answer": "insert your final answer here"}\n}\n\n\nHere are a few examples using notional tools:\n---\nTask: "Generate an image of the oldest person in this document."\n\nThought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.\nAction:\n{\n "action": "document_qa",\n "action_input": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"}\n}\nObservation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland."\n\n\nThought: I will now generate an image showcasing the oldest person.\nAction:\n{\n "action": "image_generator",\n "action_input": {"text": ""A portrait of John Doe, a 55-year-old man living in Canada.""}\n}\nObservation: "image.png"\n\nThought: I will now return the generated image.\nAction:\n{\n "action": "final_answer",\n "action_input": "image.png"\n}\n\n---\nTask: "What is the result of the following operation: 5 + 3 + 1294.678?"\n\nThought: I will use python code evaluator to compute the result of the operation and then return the final answer using the `final_answer` tool\nAction:\n{\n "action": "python_interpreter",\n "action_input": {"code": "5 + 3 + 1294.678"}\n}\nObservation: 1302.678\n\nThought: Now that I know the result, I will now return it.\nAction:\n{\n "action": "final_answer",\n "action_input": "1302.678"\n}\n\n---\nTask: "Which city has the highest population , Guangzhou or Shanghai?"\n\nThought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities.\nAction:\n{\n "action": "search",\n "action_input": "Population Guangzhou"\n}\nObservation: [\'Guangzhou has a population of 15 million inhabitants as of 2021.\']\n\n\nThought: Now let\'s get the population of Shanghai using the tool \'search\'.\nAction:\n{\n "action": "search",\n "action_input": "Population Shanghai"\n}\nObservation: \'26 million (2019)\'\n\nThought: Now I know that Shanghai has a larger population. Let\'s return the result.\nAction:\n{\n "action": "final_answer",\n "action_input": "Shanghai"\n}\n\n\nAbove example were using notional tools that might not exist for you. You only have acces to those tools:\n<>\n\nHere are the rules you should always follow to solve your task:\n1. ALWAYS provide a \'Thought:\' sequence, and an \'Action:\' sequence that ends with , else you will fail.\n2. Always use the right arguments for the tools. Never use variable names in the \'action_input\' field, use the value instead.\n3. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself.\n4. Never re-do a tool call that you previously did with the exact same parameters.\n\nNow Begin! If you solve the task correctly, you will receive a reward of $1,000,000.\n' DEFAULT_REACT_CODE_SYSTEM_PROMPT = 'You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can.\nTo do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code.\nTo solve the task, you must plan forward to proceed in a series of steps, in a cycle of \'Thought:\', \'Code:\', and \'Observation:\' sequences.\n\nAt each step, in the \'Thought:\' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use.\nThen in the \'Code:\' sequence, you should write the code in simple Python. The code sequence must end with \'\' sequence.\nDuring each intermediate step, you can use \'print()\' to save whatever important information you will then need.\nThese print outputs will then appear in the \'Observation:\' field, which will be available as input for the next step.\nIn the end you have to return a final answer using the `final_answer` tool.\n\nHere are a few examples using notional tools:\n---\nTask: "Generate an image of the oldest person in this document."\n\nThought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer.\nCode:\n```py\nanswer = document_qa(document=document, question="Who is the oldest person mentioned?")\nprint(answer)\n```\nObservation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland."\n\nThought: I will now generate an image showcasing the oldest person.\nCode:\n```py\nimage = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.")\nfinal_answer(image)\n```\n\n---\nTask: "What is the result of the following operation: 5 + 3 + 1294.678?"\n\nThought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool\nCode:\n```py\nresult = 5 + 3 + 1294.678\nfinal_answer(result)\n```\n\n---\nTask: "Which city has the highest population: Guangzhou or Shanghai?"\n\nThought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities.\nCode:\n```py\npopulation_guangzhou = search("Guangzhou population")\nprint("Population Guangzhou:", population_guangzhou)\npopulation_shanghai = search("Shanghai population")\nprint("Population Shanghai:", population_shanghai)\n```\nObservation:\nPopulation Guangzhou: [\'Guangzhou has a population of 15 million inhabitants as of 2021.\']\nPopulation Shanghai: \'26 million (2019)\'\n\nThought: Now I know that Shanghai has the highest population.\nCode:\n```py\nfinal_answer("Shanghai")\n```\n\n---\nTask: "What is the current age of the pope, raised to the power 0.36?"\n\nThought: I will use the tool `wiki` to get the age of the pope, then raise it to the power 0.36.\nCode:\n```py\npope_age = wiki(query="current pope age")\nprint("Pope age:", pope_age)\n```\nObservation:\nPope age: "The pope Francis is currently 85 years old."\n\nThought: I know that the pope is 85 years old. Let\'s compute the result using python code.\nCode:\n```py\npope_current_age = 85 ** 0.36\nfinal_answer(pope_current_age)\n```\n\nAbove example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you have acces to those tools (and no other tool):\n\n<>\n\n<>\n\nHere are the rules you should always follow to solve your task:\n1. Always provide a \'Thought:\' sequence, and a \'Code:\n```py\' sequence ending with \'```\' sequence, else you will fail.\n2. Use only variables that you have defined!\n3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in \'answer = wiki({\'query\': "What is the place where James Bond lives?"})\', but use the arguments directly as in \'answer = wiki(query="What is the place where James Bond lives?")\'.\n4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block.\n5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters.\n6. Don\'t name any new variable with the same name as a tool: for instance don\'t name a variable \'final_answer\'.\n7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables.\n8. You can use imports in your code, but only from the following list of modules: <>\n9. The state persists between code executions: so if in one step you\'ve created variables or imported modules, these will all persist.\n10. Don\'t give up! You\'re in charge of solving the task, not providing directions to solve it.\n\nNow Begin! If you solve the task correctly, you will receive a reward of $1,000,000.\n' SYSTEM_PROMPT_FACTS = 'Below I will present you a task.\n\nYou will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.\nTo do so, you will have to read the task and identify things that must be discovered in order to successfully complete it.\nDon\'t make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey:\n\n---\n### 1. Facts given in the task\nList here the specific facts given in the task that could help you (there might be nothing here).\n\n### 2. Facts to look up\nList here any facts that we may need to look up.\nAlso list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.\n\n### 3. Facts to derive\nList here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.\n\nKeep in mind that "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings:\n### 1. Facts given in the task\n### 2. Facts to look up\n### 3. Facts to derive\nDo not add anything else.' SYSTEM_PROMPT_PLAN = "You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.\n\nNow for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.\nThis plan should involve individual tasks based on the avilable tools, that if executed correctly will yield the correct answer.\nDo not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.\nAfter writing the final step of the plan, write the '\n' tag and stop there." USER_PROMPT_PLAN = '\nHere is your task:\n\nTask:\n```\n{task}\n```\n\nYour plan can leverage any of these tools:\n{tool_descriptions}\n\n{managed_agents_descriptions}\n\nList of facts that you know:\n```\n{answer_facts}\n```\n\nNow begin! Write your plan below.' SYSTEM_PROMPT_FACTS_UPDATE = '\nYou are a world expert at gathering known and unknown facts based on a conversation.\nBelow you will find a task, and ahistory of attempts made to solve the task. You will have to produce a list of these:\n### 1. Facts given in the task\n### 2. Facts that we have learned\n### 3. Facts still to look up\n### 4. Facts still to derive\nFind the task and history below.' USER_PROMPT_FACTS_UPDATE = "Earlier we've built a list of facts.\nBut since in your previous steps you may have learned useful new facts or invalidated some false ones.\nPlease update your list of facts based on the previous history, and provide these headings:\n### 1. Facts given in the task\n### 2. Facts that we have learned\n### 3. Facts still to look up\n### 4. Facts still to derive\n\nNow write your new list of facts below." SYSTEM_PROMPT_PLAN_UPDATE = 'You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.\n\nYou have been given a task:\n```\n{task}\n```\n\nFind below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task.\nIf the previous tries so far have met some success, you can make an updated plan based on these actions.\nIf you are stalled, you can make a completely new plan starting from scratch.\n' USER_PROMPT_PLAN_UPDATE = "You're still working towards solving this task:\n```\n{task}\n```\n\nYou have access to these tools and only these:\n{tool_descriptions}\n\n{managed_agents_descriptions}\n\nHere is the up to date list of facts that you know:\n```\n{facts_update}\n```\n\nNow for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.\nThis plan should involve individual tasks based on the avilable tools, that if executed correctly will yield the correct answer.\nBeware that you have {remaining_steps} steps remaining.\nDo not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.\nAfter writing the final step of the plan, write the '\n' tag and stop there.\n\nNow write your new plan below." SYSTEM_PROMPT_PLAN_STRUCTURED = 'Output a step-by-step plan to solve the task using the given tools.\nThis plan should involve individual tasks based on the avilable tools, that if executed correctly will yield the correct answer. Each step should be structured as follows:\nStep #n: {\n "description": \n "tool": ,\n "params": {\n \n }\n "output_var": \n}\nEach step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer.\n\nBelow are some examples:\n\nExample 1:\n------\nInputs:\n---\nTask:\nHow many encoder blocks were in the first attention-only ML architecture published?\n\n[FACTS LIST]:\n### 1. Facts given in the task\n- The paper first introduced an attention-only ML architecture.\n- The specific information required is the page number where the number of encoder blocks is stated.\n- No local files are provided for access.\n\n### 2. Facts to look up\n- The title and authors of the paper that first introduced an attention-only ML architecture.\n - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases)\n- The full text of the identified paper.\n - Source: Online academic repositories (e.g., arXiv, journal websites)\n- The specific page number in the paper where the number of encoder blocks is mentioned.\n - Source: The content of the identified paper\n\n### 3. Facts to derive\n- By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated.\n - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found.\n```\n\n[STEP 1 TOOL CALL]: {\'tool_name\': \'code interpreter\', \'tool_arguments\': \'# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)\'}\n[OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need\n**Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin\n[STEP 2 TOOL CALL]: {\'tool_name\': \'code interpreter\', \'tool_arguments\': \'# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)\'}\n[OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf\n---\n\nOutput plan:\n---\nStep #1: {\n "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"",\n "tool": "inspect_file_as_text",\n "params": {\n "file_path": "https://arxiv.org/pdf/1706.03762.pdf",\n "question": "On which page is the number of encoder blocks mentioned?"\n },\n "output_var": "page_number"\n}\n\nStep #2: {\n "description": "Provide the final answer",\n "tool": "final_answer",\n "params": {\n "answer": "{page_number}"\n },\n "output_var": ""\n}\n------\n\nExample 2:\n------\nInputs:\n---\nTask:\nHow many golf balls fits into a Boeing-747?\n\n[FACTS LIST]:\n### 1. Facts given in the task\n- The task requires calculating the number of golf balls that fir into a Boeing-747\n### 2. Facts to look up\n- The volume of a golf ball\n- The volume of a Boeing-747\n### 3. Facts to derive\n- Once the volumes are known the final answer can be calculated\n---\nOutput plan:\n---\nStep #1: {\n "description": "Find the volume of a Boeing-747",\n "tool": "web_search",\n "params": {\n "query": "What is the internal volume of a Boeing-747 in cubic meters?"\n },\n "output_var": "boeing_volume"\n}\n\nStep #2: {\n "description": "Find the volume of a standard golf ball",\n "tool": "ask_search_agent",\n "params": {\n "query": "What is the volume of a standard golf ball in cubic centimeters?"\n },\n "output_var": "golf_ball_volume"\n}\n\nStep #3: {\n "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.",\n "tool": "python_code",\n "params": {\n "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3"\n },\n "output_var": "number_of_golf_balls"\n}\n\nStep #4: {\n "description": "Provide the final answer",\n "tool": "final_answer",\n "params": {\n "answer": "{number_of_golf_balls}"\n },\n "output_var": ""\n}\n------\nAbove example were using tools that might not exist for you.\nYour goal is to create a plan to solve the task.' USER_PROMPT_PLAN_STRUCTURED = "\nHere are your inputs:\n\nTask:\n```\n{task}\n```\n\nYour plan can leverage any of these tools:\n{tool_descriptions}\nThese tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code.\n\nList of facts that you know:\n```\n{answer_facts}\n```\n\nNow for the given task, create a plan taking into account the list of facts.\nAfter writing the final step of the plan, write the '\n' tag and stop there. Output the plan only and nothing else." SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED = 'Output a step-by-step plan to solve the task using the given tools.\nThis plan should involve individual tasks based on the avilable tools, that if executed correctly will yield the correct answer. Each step should be structured as follows:\nStep #n: {{\n "description": \n "tool": ,\n "params": {{\n \n }}\n "output_var": \n}}\nEach step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer.\n\nBelow are some examples:\n\nExample 1:\n------\nInputs:\n---\nTask:\nHow many encoder blocks were in the first attention-only ML architecture published?\n\n[FACTS LIST]:\n### 1. Facts given in the task\n- The paper first introduced an attention-only ML architecture.\n- The specific information required is the page number where the number of encoder blocks is stated.\n- No local files are provided for access.\n\n### 2. Facts to look up\n- The title and authors of the paper that first introduced an attention-only ML architecture.\n - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases)\n- The full text of the identified paper.\n - Source: Online academic repositories (e.g., arXiv, journal websites)\n- The specific page number in the paper where the number of encoder blocks is mentioned.\n - Source: The content of the identified paper\n\n### 3. Facts to derive\n- By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated.\n - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found.\n```\n\n[STEP 1 TOOL CALL]: {{\'tool_name\': \'code interpreter\', \'tool_arguments\': \'# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)\'}}\n[OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need\n**Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin\n[STEP 2 TOOL CALL]: {{\'tool_name\': \'code interpreter\', \'tool_arguments\': \'# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)\'}}\n[OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf\n---\n\nOutput plan:\n---\nStep #1: {{\n "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"",\n "tool": "inspect_file_as_text",\n "params": {{\n "file_path": "https://arxiv.org/pdf/1706.03762.pdf",\n "question": "On which page is the number of encoder blocks mentioned?"\n }},\n "output_var": "page_number"\n}}\n\nStep #2: {{\n "description": "Provide the final answer",\n "tool": "final_answer",\n "params": {{\n "answer": "{{page_number}}"\n }},\n "output_var": ""\n}}\n------\n\nExample 2:\n------\nInputs:\n---\nTask:\nHow many golf balls fits into a Boeing-747?\n\n[FACTS LIST]:\n### 1. Facts given in the task\n- The task requires calculating the number of golf balls that fir into a Boeing-747\n### 2. Facts to look up\n- The volume of a golf ball\n- The volume of a Boeing-747\n### 3. Facts to derive\n- Once the volumes are known the final answer can be calculated\n---\nOutput plan:\n---\nStep #1: {{\n "description": "Find the volume of a Boeing-747",\n "tool": "web_search",\n "params": {{\n "query": "What is the internal volume of a Boeing-747 in cubic meters?"\n }},\n "output_var": "boeing_volume"\n}}\n\nStep #2: {{\n "description": "Find the volume of a standard golf ball",\n "tool": "ask_search_agent",\n "params": {{\n "query": "What is the volume of a standard golf ball in cubic centimeters?"\n }},\n "output_var": "golf_ball_volume"\n}}\n\nStep #3: {{\n "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.",\n "tool": "python_code",\n "params": {{\n "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3"\n }},\n "output_var": "number_of_golf_balls"\n}}\n\nStep #4: {{\n "description": "Provide the final answer",\n "tool": "final_answer",\n "params": {{\n "answer": "{{number_of_golf_balls}}"\n }},\n "output_var": ""\n}}\n------\nAbove example were using tools that might not exist for you.\nFind below the record of what has been tried so far to solve it. Your goal is to create an updated plan to solve the task.' USER_PROMPT_PLAN_UPDATE_STRUCTURED = "\nHere are your inputs:\n\nTask:\n```\n{task}\n```\n\nYour plan can leverage any of these tools:\n{tool_descriptions}\nThese tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code.\n\nList of facts that you know:\n```\n{facts_update}\n```\n\nNow for the given task, create a plan taking into account the above inputs and list of facts.\nBeware that you have {remaining_steps} steps remaining.\nAfter writing the final step of the plan, write the '\n' tag and stop there. Output the plan only and nothing else." PLAN_UPDATE_FINAL_PLAN_REDACTION = 'I still need to solve the task I was given:\n```\n{task}\n```\n\nHere is my new/updated plan of action to solve the task:\n```\n{plan_update}\n```' SUPPORTED_PLAN_TYPES = ['default', 'structured'] PROMPTS_FOR_INITIAL_PLAN = {'default': {'system': SYSTEM_PROMPT_PLAN, 'user': USER_PROMPT_PLAN}, 'structured': {'system': SYSTEM_PROMPT_PLAN_STRUCTURED, 'user': USER_PROMPT_PLAN_STRUCTURED}} PROMPTS_FOR_PLAN_UPDATE = {'default': {'system': SYSTEM_PROMPT_PLAN_UPDATE, 'user': USER_PROMPT_PLAN_UPDATE}, 'structured': {'system': SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED, 'user': USER_PROMPT_PLAN_UPDATE_STRUCTURED}} # File: transformers-main/src/transformers/agents/python_interpreter.py import ast import builtins import difflib from collections.abc import Mapping from importlib import import_module from typing import Any, Callable, Dict, List, Optional import numpy as np from ..utils import is_pandas_available if is_pandas_available(): import pandas as pd class InterpreterError(ValueError): pass ERRORS = {name: getattr(builtins, name) for name in dir(builtins) if isinstance(getattr(builtins, name), type) and issubclass(getattr(builtins, name), BaseException)} LIST_SAFE_MODULES = ['random', 'collections', 'math', 'time', 'queue', 'itertools', 're', 'stat', 'statistics', 'unicodedata'] (PRINT_OUTPUTS, MAX_LEN_OUTPUT) = ('', 50000) (OPERATIONS_COUNT, MAX_OPERATIONS) = (0, 10000000) class BreakException(Exception): pass class ContinueException(Exception): pass class ReturnException(Exception): def __init__(self, value): self.value = value def get_iterable(obj): if isinstance(obj, list): return obj elif hasattr(obj, '__iter__'): return list(obj) else: raise InterpreterError('Object is not iterable') def evaluate_unaryop(expression, state, static_tools, custom_tools): operand = evaluate_ast(expression.operand, state, static_tools, custom_tools) if isinstance(expression.op, ast.USub): return -operand elif isinstance(expression.op, ast.UAdd): return operand elif isinstance(expression.op, ast.Not): return not operand elif isinstance(expression.op, ast.Invert): return ~operand else: raise InterpreterError(f'Unary operation {expression.op.__class__.__name__} is not supported.') def evaluate_lambda(lambda_expression, state, static_tools, custom_tools): args = [arg.arg for arg in lambda_expression.args.args] def lambda_func(*values): new_state = state.copy() for (arg, value) in zip(args, values): new_state[arg] = value return evaluate_ast(lambda_expression.body, new_state, static_tools, custom_tools) return lambda_func def evaluate_while(while_loop, state, static_tools, custom_tools): max_iterations = 1000 iterations = 0 while evaluate_ast(while_loop.test, state, static_tools, custom_tools): for node in while_loop.body: try: evaluate_ast(node, state, static_tools, custom_tools) except BreakException: return None except ContinueException: break iterations += 1 if iterations > max_iterations: raise InterpreterError(f'Maximum number of {max_iterations} iterations in While loop exceeded') return None def create_function(func_def, state, static_tools, custom_tools): def new_func(*args, **kwargs): func_state = state.copy() arg_names = [arg.arg for arg in func_def.args.args] default_values = [evaluate_ast(d, state, static_tools, custom_tools) for d in func_def.args.defaults] defaults = dict(zip(arg_names[-len(default_values):], default_values)) for (name, value) in zip(arg_names, args): func_state[name] = value for (name, value) in kwargs.items(): func_state[name] = value if func_def.args.vararg: vararg_name = func_def.args.vararg.arg func_state[vararg_name] = args if func_def.args.kwarg: kwarg_name = func_def.args.kwarg.arg func_state[kwarg_name] = kwargs for (name, value) in defaults.items(): if name not in func_state: func_state[name] = value if func_def.args.args and func_def.args.args[0].arg == 'self': if args: func_state['self'] = args[0] func_state['__class__'] = args[0].__class__ result = None try: for stmt in func_def.body: result = evaluate_ast(stmt, func_state, static_tools, custom_tools) except ReturnException as e: result = e.value return result return new_func def create_class(class_name, class_bases, class_body): class_dict = {} for (key, value) in class_body.items(): class_dict[key] = value return type(class_name, tuple(class_bases), class_dict) def evaluate_function_def(func_def, state, static_tools, custom_tools): custom_tools[func_def.name] = create_function(func_def, state, static_tools, custom_tools) return custom_tools[func_def.name] def evaluate_class_def(class_def, state, static_tools, custom_tools): class_name = class_def.name bases = [evaluate_ast(base, state, static_tools, custom_tools) for base in class_def.bases] class_dict = {} for stmt in class_def.body: if isinstance(stmt, ast.FunctionDef): class_dict[stmt.name] = evaluate_function_def(stmt, state, static_tools, custom_tools) elif isinstance(stmt, ast.Assign): for target in stmt.targets: if isinstance(target, ast.Name): class_dict[target.id] = evaluate_ast(stmt.value, state, static_tools, custom_tools) elif isinstance(target, ast.Attribute): class_dict[target.attr] = evaluate_ast(stmt.value, state, static_tools, custom_tools) else: raise InterpreterError(f'Unsupported statement in class body: {stmt.__class__.__name__}') new_class = type(class_name, tuple(bases), class_dict) state[class_name] = new_class return new_class def evaluate_augassign(expression, state, static_tools, custom_tools): def get_current_value(target): if isinstance(target, ast.Name): return state.get(target.id, 0) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools) key = evaluate_ast(target.slice, state, static_tools, custom_tools) return obj[key] elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools) return getattr(obj, target.attr) elif isinstance(target, ast.Tuple): return tuple((get_current_value(elt) for elt in target.elts)) elif isinstance(target, ast.List): return [get_current_value(elt) for elt in target.elts] else: raise InterpreterError('AugAssign not supported for {type(target)} targets.') current_value = get_current_value(expression.target) value_to_add = evaluate_ast(expression.value, state, static_tools, custom_tools) if isinstance(expression.op, ast.Add): if isinstance(current_value, list): if not isinstance(value_to_add, list): raise InterpreterError(f'Cannot add non-list value {value_to_add} to a list.') updated_value = current_value + value_to_add else: updated_value = current_value + value_to_add elif isinstance(expression.op, ast.Sub): updated_value = current_value - value_to_add elif isinstance(expression.op, ast.Mult): updated_value = current_value * value_to_add elif isinstance(expression.op, ast.Div): updated_value = current_value / value_to_add elif isinstance(expression.op, ast.Mod): updated_value = current_value % value_to_add elif isinstance(expression.op, ast.Pow): updated_value = current_value ** value_to_add elif isinstance(expression.op, ast.FloorDiv): updated_value = current_value // value_to_add elif isinstance(expression.op, ast.BitAnd): updated_value = current_value & value_to_add elif isinstance(expression.op, ast.BitOr): updated_value = current_value | value_to_add elif isinstance(expression.op, ast.BitXor): updated_value = current_value ^ value_to_add elif isinstance(expression.op, ast.LShift): updated_value = current_value << value_to_add elif isinstance(expression.op, ast.RShift): updated_value = current_value >> value_to_add else: raise InterpreterError(f'Operation {type(expression.op).__name__} is not supported.') set_value(expression.target, updated_value, state, static_tools, custom_tools) return updated_value def evaluate_boolop(node, state, static_tools, custom_tools): if isinstance(node.op, ast.And): for value in node.values: if not evaluate_ast(value, state, static_tools, custom_tools): return False return True elif isinstance(node.op, ast.Or): for value in node.values: if evaluate_ast(value, state, static_tools, custom_tools): return True return False def evaluate_binop(binop, state, static_tools, custom_tools): left_val = evaluate_ast(binop.left, state, static_tools, custom_tools) right_val = evaluate_ast(binop.right, state, static_tools, custom_tools) if isinstance(binop.op, ast.Add): return left_val + right_val elif isinstance(binop.op, ast.Sub): return left_val - right_val elif isinstance(binop.op, ast.Mult): return left_val * right_val elif isinstance(binop.op, ast.Div): return left_val / right_val elif isinstance(binop.op, ast.Mod): return left_val % right_val elif isinstance(binop.op, ast.Pow): return left_val ** right_val elif isinstance(binop.op, ast.FloorDiv): return left_val // right_val elif isinstance(binop.op, ast.BitAnd): return left_val & right_val elif isinstance(binop.op, ast.BitOr): return left_val | right_val elif isinstance(binop.op, ast.BitXor): return left_val ^ right_val elif isinstance(binop.op, ast.LShift): return left_val << right_val elif isinstance(binop.op, ast.RShift): return left_val >> right_val else: raise NotImplementedError(f'Binary operation {type(binop.op).__name__} is not implemented.') def evaluate_assign(assign, state, static_tools, custom_tools): result = evaluate_ast(assign.value, state, static_tools, custom_tools) if len(assign.targets) == 1: target = assign.targets[0] set_value(target, result, state, static_tools, custom_tools) else: if len(assign.targets) != len(result): raise InterpreterError(f'Assign failed: expected {len(result)} values but got {len(assign.targets)}.') expanded_values = [] for tgt in assign.targets: if isinstance(tgt, ast.Starred): expanded_values.extend(result) else: expanded_values.append(result) for (tgt, val) in zip(assign.targets, expanded_values): set_value(tgt, val, state, static_tools, custom_tools) return result def set_value(target, value, state, static_tools, custom_tools): if isinstance(target, ast.Name): if target.id in static_tools: raise InterpreterError(f"Cannot assign to name '{target.id}': doing this would erase the existing tool!") state[target.id] = value elif isinstance(target, ast.Tuple): if not isinstance(value, tuple): if hasattr(value, '__iter__') and (not isinstance(value, (str, bytes))): value = tuple(value) else: raise InterpreterError('Cannot unpack non-tuple value') if len(target.elts) != len(value): raise InterpreterError('Cannot unpack tuple of wrong size') for (i, elem) in enumerate(target.elts): set_value(elem, value[i], state, static_tools, custom_tools) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools) key = evaluate_ast(target.slice, state, static_tools, custom_tools) obj[key] = value elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools) setattr(obj, target.attr, value) def evaluate_call(call, state, static_tools, custom_tools): if not (isinstance(call.func, ast.Attribute) or isinstance(call.func, ast.Name)): raise InterpreterError(f'This is not a correct function: {call.func}).') if isinstance(call.func, ast.Attribute): obj = evaluate_ast(call.func.value, state, static_tools, custom_tools) func_name = call.func.attr if not hasattr(obj, func_name): raise InterpreterError(f'Object {obj} has no attribute {func_name}') func = getattr(obj, func_name) elif isinstance(call.func, ast.Name): func_name = call.func.id if func_name in state: func = state[func_name] elif func_name in static_tools: func = static_tools[func_name] elif func_name in custom_tools: func = custom_tools[func_name] elif func_name in ERRORS: func = ERRORS[func_name] else: raise InterpreterError(f'It is not permitted to evaluate other functions than the provided tools or functions defined in previous code (tried to execute {call.func.id}).') args = [] for arg in call.args: if isinstance(arg, ast.Starred): args.extend(evaluate_ast(arg.value, state, static_tools, custom_tools)) else: args.append(evaluate_ast(arg, state, static_tools, custom_tools)) args = [] for arg in call.args: if isinstance(arg, ast.Starred): unpacked = evaluate_ast(arg.value, state, static_tools, custom_tools) if not hasattr(unpacked, '__iter__') or isinstance(unpacked, (str, bytes)): raise InterpreterError(f'Cannot unpack non-iterable value {unpacked}') args.extend(unpacked) else: args.append(evaluate_ast(arg, state, static_tools, custom_tools)) kwargs = {keyword.arg: evaluate_ast(keyword.value, state, static_tools, custom_tools) for keyword in call.keywords} if isinstance(func, type) and len(func.__module__.split('.')) > 1: obj = func.__new__(func) if hasattr(obj, '__init__'): obj.__init__(*args, **kwargs) return obj elif func_name == 'super': if not args: if '__class__' in state and 'self' in state: return super(state['__class__'], state['self']) else: raise InterpreterError('super() needs at least one argument') cls = args[0] if not isinstance(cls, type): raise InterpreterError('super() argument 1 must be type') if len(args) == 1: return super(cls) elif len(args) == 2: instance = args[1] return super(cls, instance) else: raise InterpreterError('super() takes at most 2 arguments') elif func_name == 'print': output = ' '.join(map(str, args)) global PRINT_OUTPUTS PRINT_OUTPUTS += output + '\n' return None else: output = func(*args, **kwargs) return output def evaluate_subscript(subscript, state, static_tools, custom_tools): index = evaluate_ast(subscript.slice, state, static_tools, custom_tools) value = evaluate_ast(subscript.value, state, static_tools, custom_tools) if isinstance(value, str) and isinstance(index, str): raise InterpreterError("You're trying to subscript a string with a string index, which is impossible") if isinstance(value, pd.core.indexing._LocIndexer): parent_object = value.obj return parent_object.loc[index] if isinstance(value, (pd.DataFrame, pd.Series, np.ndarray)): return value[index] elif isinstance(value, pd.core.groupby.generic.DataFrameGroupBy): return value[index] elif isinstance(index, slice): return value[index] elif isinstance(value, (list, tuple)): if not -len(value) <= index < len(value): raise InterpreterError(f'Index {index} out of bounds for list of length {len(value)}') return value[int(index)] elif isinstance(value, str): if not -len(value) <= index < len(value): raise InterpreterError(f'Index {index} out of bounds for string of length {len(value)}') return value[index] elif index in value: return value[index] elif isinstance(index, str) and isinstance(value, Mapping): close_matches = difflib.get_close_matches(index, list(value.keys())) if len(close_matches) > 0: return value[close_matches[0]] raise InterpreterError(f"Could not index {value} with '{index}'.") def evaluate_name(name, state, static_tools, custom_tools): if name.id in state: return state[name.id] elif name.id in static_tools: return static_tools[name.id] elif name.id in ERRORS: return ERRORS[name.id] close_matches = difflib.get_close_matches(name.id, list(state.keys())) if len(close_matches) > 0: return state[close_matches[0]] raise InterpreterError(f'The variable `{name.id}` is not defined.') def evaluate_condition(condition, state, static_tools, custom_tools): left = evaluate_ast(condition.left, state, static_tools, custom_tools) comparators = [evaluate_ast(c, state, static_tools, custom_tools) for c in condition.comparators] ops = [type(op) for op in condition.ops] result = True current_left = left for (op, comparator) in zip(ops, comparators): if op == ast.Eq: current_result = current_left == comparator elif op == ast.NotEq: current_result = current_left != comparator elif op == ast.Lt: current_result = current_left < comparator elif op == ast.LtE: current_result = current_left <= comparator elif op == ast.Gt: current_result = current_left > comparator elif op == ast.GtE: current_result = current_left >= comparator elif op == ast.Is: current_result = current_left is comparator elif op == ast.IsNot: current_result = current_left is not comparator elif op == ast.In: current_result = current_left in comparator elif op == ast.NotIn: current_result = current_left not in comparator else: raise InterpreterError(f'Operator not supported: {op}') result = result & current_result current_left = comparator if isinstance(result, bool) and (not result): break return result if isinstance(result, (bool, pd.Series)) else result.all() def evaluate_if(if_statement, state, static_tools, custom_tools): result = None test_result = evaluate_ast(if_statement.test, state, static_tools, custom_tools) if test_result: for line in if_statement.body: line_result = evaluate_ast(line, state, static_tools, custom_tools) if line_result is not None: result = line_result else: for line in if_statement.orelse: line_result = evaluate_ast(line, state, static_tools, custom_tools) if line_result is not None: result = line_result return result def evaluate_for(for_loop, state, static_tools, custom_tools): result = None iterator = evaluate_ast(for_loop.iter, state, static_tools, custom_tools) for counter in iterator: set_value(for_loop.target, counter, state, static_tools, custom_tools) for node in for_loop.body: try: line_result = evaluate_ast(node, state, static_tools, custom_tools) if line_result is not None: result = line_result except BreakException: break except ContinueException: continue else: continue break return result def evaluate_listcomp(listcomp, state, static_tools, custom_tools): def inner_evaluate(generators, index, current_state): if index >= len(generators): return [evaluate_ast(listcomp.elt, current_state, static_tools, custom_tools)] generator = generators[index] iter_value = evaluate_ast(generator.iter, current_state, static_tools, custom_tools) result = [] for value in iter_value: new_state = current_state.copy() if isinstance(generator.target, ast.Tuple): for (idx, elem) in enumerate(generator.target.elts): new_state[elem.id] = value[idx] else: new_state[generator.target.id] = value if all((evaluate_ast(if_clause, new_state, static_tools, custom_tools) for if_clause in generator.ifs)): result.extend(inner_evaluate(generators, index + 1, new_state)) return result return inner_evaluate(listcomp.generators, 0, state) def evaluate_try(try_node, state, static_tools, custom_tools): try: for stmt in try_node.body: evaluate_ast(stmt, state, static_tools, custom_tools) except Exception as e: matched = False for handler in try_node.handlers: if handler.type is None or isinstance(e, evaluate_ast(handler.type, state, static_tools, custom_tools)): matched = True if handler.name: state[handler.name] = e for stmt in handler.body: evaluate_ast(stmt, state, static_tools, custom_tools) break if not matched: raise e else: if try_node.orelse: for stmt in try_node.orelse: evaluate_ast(stmt, state, static_tools, custom_tools) finally: if try_node.finalbody: for stmt in try_node.finalbody: evaluate_ast(stmt, state, static_tools, custom_tools) def evaluate_raise(raise_node, state, static_tools, custom_tools): if raise_node.exc is not None: exc = evaluate_ast(raise_node.exc, state, static_tools, custom_tools) else: exc = None if raise_node.cause is not None: cause = evaluate_ast(raise_node.cause, state, static_tools, custom_tools) else: cause = None if exc is not None: if cause is not None: raise exc from cause else: raise exc else: raise InterpreterError('Re-raise is not supported without an active exception') def evaluate_assert(assert_node, state, static_tools, custom_tools): test_result = evaluate_ast(assert_node.test, state, static_tools, custom_tools) if not test_result: if assert_node.msg: msg = evaluate_ast(assert_node.msg, state, static_tools, custom_tools) raise AssertionError(msg) else: test_code = ast.unparse(assert_node.test) raise AssertionError(f'Assertion failed: {test_code}') def evaluate_with(with_node, state, static_tools, custom_tools): contexts = [] for item in with_node.items: context_expr = evaluate_ast(item.context_expr, state, static_tools, custom_tools) if item.optional_vars: state[item.optional_vars.id] = context_expr.__enter__() contexts.append(state[item.optional_vars.id]) else: context_var = context_expr.__enter__() contexts.append(context_var) try: for stmt in with_node.body: evaluate_ast(stmt, state, static_tools, custom_tools) except Exception as e: for context in reversed(contexts): context.__exit__(type(e), e, e.__traceback__) raise else: for context in reversed(contexts): context.__exit__(None, None, None) def import_modules(expression, state, authorized_imports): def check_module_authorized(module_name): module_path = module_name.split('.') module_subpaths = ['.'.join(module_path[:i]) for i in range(1, len(module_path) + 1)] return any((subpath in authorized_imports for subpath in module_subpaths)) if isinstance(expression, ast.Import): for alias in expression.names: if check_module_authorized(alias.name): module = import_module(alias.name) state[alias.asname or alias.name] = module else: raise InterpreterError(f'Import of {alias.name} is not allowed. Authorized imports are: {str(authorized_imports)}') return None elif isinstance(expression, ast.ImportFrom): if check_module_authorized(expression.module): module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) for alias in expression.names: state[alias.asname or alias.name] = getattr(module, alias.name) else: raise InterpreterError(f'Import from {expression.module} is not allowed.') return None def evaluate_dictcomp(dictcomp, state, static_tools, custom_tools): result = {} for gen in dictcomp.generators: iter_value = evaluate_ast(gen.iter, state, static_tools, custom_tools) for value in iter_value: new_state = state.copy() set_value(gen.target, value, new_state, static_tools, custom_tools) if all((evaluate_ast(if_clause, new_state, static_tools, custom_tools) for if_clause in gen.ifs)): key = evaluate_ast(dictcomp.key, new_state, static_tools, custom_tools) val = evaluate_ast(dictcomp.value, new_state, static_tools, custom_tools) result[key] = val return result def evaluate_ast(expression: ast.AST, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str]=LIST_SAFE_MODULES): global OPERATIONS_COUNT if OPERATIONS_COUNT >= MAX_OPERATIONS: raise InterpreterError(f"Reached the max number of operations of {MAX_OPERATIONS}. Maybe there is an infinite loop somewhere in the code, or you're just asking too many calculations.") OPERATIONS_COUNT += 1 if isinstance(expression, ast.Assign): return evaluate_assign(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.AugAssign): return evaluate_augassign(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Call): return evaluate_call(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Constant): return expression.value elif isinstance(expression, ast.Tuple): return tuple((evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts)) elif isinstance(expression, (ast.ListComp, ast.GeneratorExp)): return evaluate_listcomp(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.UnaryOp): return evaluate_unaryop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Starred): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.BoolOp): return evaluate_boolop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Break): raise BreakException() elif isinstance(expression, ast.Continue): raise ContinueException() elif isinstance(expression, ast.BinOp): return evaluate_binop(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Compare): return evaluate_condition(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Lambda): return evaluate_lambda(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.FunctionDef): return evaluate_function_def(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Dict): keys = [evaluate_ast(k, state, static_tools, custom_tools) for k in expression.keys] values = [evaluate_ast(v, state, static_tools, custom_tools) for v in expression.values] return dict(zip(keys, values)) elif isinstance(expression, ast.Expr): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.For): return evaluate_for(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.FormattedValue): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.If): return evaluate_if(expression, state, static_tools, custom_tools) elif hasattr(ast, 'Index') and isinstance(expression, ast.Index): return evaluate_ast(expression.value, state, static_tools, custom_tools) elif isinstance(expression, ast.JoinedStr): return ''.join([str(evaluate_ast(v, state, static_tools, custom_tools)) for v in expression.values]) elif isinstance(expression, ast.List): return [evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts] elif isinstance(expression, ast.Name): return evaluate_name(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Subscript): return evaluate_subscript(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.IfExp): test_val = evaluate_ast(expression.test, state, static_tools, custom_tools) if test_val: return evaluate_ast(expression.body, state, static_tools, custom_tools) else: return evaluate_ast(expression.orelse, state, static_tools, custom_tools) elif isinstance(expression, ast.Attribute): value = evaluate_ast(expression.value, state, static_tools, custom_tools) return getattr(value, expression.attr) elif isinstance(expression, ast.Slice): return slice(evaluate_ast(expression.lower, state, static_tools, custom_tools) if expression.lower is not None else None, evaluate_ast(expression.upper, state, static_tools, custom_tools) if expression.upper is not None else None, evaluate_ast(expression.step, state, static_tools, custom_tools) if expression.step is not None else None) elif isinstance(expression, ast.DictComp): return evaluate_dictcomp(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.While): return evaluate_while(expression, state, static_tools, custom_tools) elif isinstance(expression, (ast.Import, ast.ImportFrom)): return import_modules(expression, state, authorized_imports) elif isinstance(expression, ast.ClassDef): return evaluate_class_def(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Try): return evaluate_try(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Raise): return evaluate_raise(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Assert): return evaluate_assert(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.With): return evaluate_with(expression, state, static_tools, custom_tools) elif isinstance(expression, ast.Set): return {evaluate_ast(elt, state, static_tools, custom_tools) for elt in expression.elts} elif isinstance(expression, ast.Return): raise ReturnException(evaluate_ast(expression.value, state, static_tools, custom_tools) if expression.value else None) else: raise InterpreterError(f'{expression.__class__.__name__} is not supported.') def evaluate_python_code(code: str, static_tools: Optional[Dict[str, Callable]]=None, custom_tools: Optional[Dict[str, Callable]]=None, state: Optional[Dict[str, Any]]=None, authorized_imports: List[str]=LIST_SAFE_MODULES): try: expression = ast.parse(code) except SyntaxError as e: raise SyntaxError(f'The code generated by the agent is not valid.\n{e}') if state is None: state = {} if static_tools is None: static_tools = {} if custom_tools is None: custom_tools = {} result = None global PRINT_OUTPUTS PRINT_OUTPUTS = '' global OPERATIONS_COUNT OPERATIONS_COUNT = 0 for node in expression.body: try: result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) except InterpreterError as e: msg = '' if len(PRINT_OUTPUTS) > 0: if len(PRINT_OUTPUTS) < MAX_LEN_OUTPUT: msg += f'Print outputs:\n{PRINT_OUTPUTS}\n====\n' else: msg += f'Print outputs:\n{PRINT_OUTPUTS[:MAX_LEN_OUTPUT]}\n_Print outputs were over {MAX_LEN_OUTPUT} characters, so they have been truncated._\n====\n' msg += f"EXECUTION FAILED:\nEvaluation stopped at line '{ast.get_source_segment(code, node)}' because of the following error:\n{e}" raise InterpreterError(msg) finally: if len(PRINT_OUTPUTS) < MAX_LEN_OUTPUT: state['print_outputs'] = PRINT_OUTPUTS else: state['print_outputs'] = PRINT_OUTPUTS[:MAX_LEN_OUTPUT] + f'\n_Print outputs were over {MAX_LEN_OUTPUT} characters, so they have been truncated._' return result # File: transformers-main/src/transformers/agents/search.py import re import requests from requests.exceptions import RequestException from .tools import Tool class DuckDuckGoSearchTool(Tool): name = 'web_search' description = "Perform a web search based on your query (think a Google search) then returns the top search results as a list of dict elements.\n Each result has keys 'title', 'href' and 'body'." inputs = {'query': {'type': 'text', 'description': 'The search query to perform.'}} output_type = 'any' def forward(self, query: str) -> str: try: from duckduckgo_search import DDGS except ImportError: raise ImportError('You must install package `duckduckgo_search` to run this tool: for instance run `pip install duckduckgo-search`.') results = DDGS().text(query, max_results=7) return results class VisitWebpageTool(Tool): name = 'visit_webpage' description = 'Visits a wbepage at the given url and returns its content as a markdown string.' inputs = {'url': {'type': 'text', 'description': 'The url of the webpage to visit.'}} output_type = 'text' def forward(self, url: str) -> str: try: from markdownify import markdownify except ImportError: raise ImportError('You must install package `markdownify` to run this tool: for instance run `pip install markdownify`.') try: response = requests.get(url) response.raise_for_status() markdown_content = markdownify(response.text).strip() markdown_content = re.sub('\\n{3,}', '\n\n', markdown_content) return markdown_content except RequestException as e: return f'Error fetching the webpage: {str(e)}' except Exception as e: return f'An unexpected error occurred: {str(e)}' # File: transformers-main/src/transformers/agents/speech_to_text.py from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .tools import PipelineTool class SpeechToTextTool(PipelineTool): default_checkpoint = 'distil-whisper/distil-large-v3' description = 'This is a tool that transcribes an audio into text. It returns the transcribed text.' name = 'transcriber' pre_processor_class = WhisperProcessor model_class = WhisperForConditionalGeneration inputs = {'audio': {'type': 'audio', 'description': 'The audio to transcribe'}} output_type = 'text' def encode(self, audio): return self.pre_processor(audio, return_tensors='pt') def forward(self, inputs): return self.model.generate(inputs['input_features']) def decode(self, outputs): return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0] # File: transformers-main/src/transformers/agents/text_to_speech.py import torch from ..models.speecht5 import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor from ..utils import is_datasets_available from .tools import PipelineTool if is_datasets_available(): from datasets import load_dataset class TextToSpeechTool(PipelineTool): default_checkpoint = 'microsoft/speecht5_tts' description = 'This is a tool that reads an English text out loud. It returns a waveform object containing the sound.' name = 'text_to_speech' pre_processor_class = SpeechT5Processor model_class = SpeechT5ForTextToSpeech post_processor_class = SpeechT5HifiGan inputs = {'text': {'type': 'text', 'description': 'The text to read out loud (in English)'}} output_type = 'audio' def setup(self): if self.post_processor is None: self.post_processor = 'microsoft/speecht5_hifigan' super().setup() def encode(self, text, speaker_embeddings=None): inputs = self.pre_processor(text=text, return_tensors='pt', truncation=True) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('Datasets needs to be installed if not passing speaker embeddings.') embeddings_dataset = load_dataset('Matthijs/cmu-arctic-xvectors', split='validation', trust_remote_code=True) speaker_embeddings = torch.tensor(embeddings_dataset[7305]['xvector']).unsqueeze(0) return {'input_ids': inputs['input_ids'], 'speaker_embeddings': speaker_embeddings} def forward(self, inputs): with torch.no_grad(): return self.model.generate_speech(**inputs) def decode(self, outputs): with torch.no_grad(): return self.post_processor(outputs).cpu().detach() # File: transformers-main/src/transformers/agents/tools.py import base64 import importlib import io import json import os import tempfile from functools import lru_cache from typing import Any, Dict, List, Optional, Union from huggingface_hub import create_repo, get_collection, hf_hub_download, metadata_update, upload_folder from huggingface_hub.utils import RepositoryNotFoundError, build_hf_headers, get_session from packaging import version from ..dynamic_module_utils import custom_object_save, get_class_from_dynamic_module, get_imports from ..models.auto import AutoProcessor from ..utils import CONFIG_NAME, cached_file, is_accelerate_available, is_torch_available, is_vision_available, logging from .agent_types import handle_agent_inputs, handle_agent_outputs logger = logging.get_logger(__name__) if is_torch_available(): import torch if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import send_to_device TOOL_CONFIG_FILE = 'tool_config.json' def get_repo_type(repo_id, repo_type=None, **hub_kwargs): if repo_type is not None: return repo_type try: hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type='space', **hub_kwargs) return 'space' except RepositoryNotFoundError: try: hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type='model', **hub_kwargs) return 'model' except RepositoryNotFoundError: raise EnvironmentError(f'`{repo_id}` does not seem to be a valid repo identifier on the Hub.') except Exception: return 'model' except Exception: return 'space' APP_FILE_TEMPLATE = 'from transformers import launch_gradio_demo\nfrom {module_name} import {class_name}\n\nlaunch_gradio_demo({class_name})\n' class Tool: name: str description: str inputs: Dict[str, Dict[str, Union[str, type]]] output_type: type def __init__(self, *args, **kwargs): self.is_initialized = False def validate_attributes(self): required_attributes = {'description': str, 'name': str, 'inputs': Dict, 'output_type': type} for (attr, expected_type) in required_attributes.items(): attr_value = getattr(self, attr, None) if not isinstance(attr_value, expected_type): raise TypeError(f'Instance attribute {attr} must exist and be of type {expected_type.__name__}') def forward(self, *args, **kwargs): return NotImplemented('Write this method in your subclass of `Tool`.') def __call__(self, *args, **kwargs): (args, kwargs) = handle_agent_inputs(*args, **kwargs) outputs = self.forward(*args, **kwargs) return handle_agent_outputs(outputs, self.output_type) def setup(self): self.is_initialized = True def save(self, output_dir): os.makedirs(output_dir, exist_ok=True) if self.__module__ == '__main__': raise ValueError(f"We can't save the code defining {self} in {output_dir} as it's been defined in __main__. You have to put this code in a separate module so we can include it in the saved folder.") module_files = custom_object_save(self, output_dir) module_name = self.__class__.__module__ last_module = module_name.split('.')[-1] full_name = f'{last_module}.{self.__class__.__name__}' config_file = os.path.join(output_dir, 'tool_config.json') if os.path.isfile(config_file): with open(config_file, 'r', encoding='utf-8') as f: tool_config = json.load(f) else: tool_config = {} tool_config = {'tool_class': full_name, 'description': self.description, 'name': self.name, 'inputs': self.inputs, 'output_type': str(self.output_type)} with open(config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tool_config, indent=2, sort_keys=True) + '\n') app_file = os.path.join(output_dir, 'app.py') with open(app_file, 'w', encoding='utf-8') as f: f.write(APP_FILE_TEMPLATE.format(module_name=last_module, class_name=self.__class__.__name__)) requirements_file = os.path.join(output_dir, 'requirements.txt') imports = [] for module in module_files: imports.extend(get_imports(module)) imports = list(set(imports)) with open(requirements_file, 'w', encoding='utf-8') as f: f.write('\n'.join(imports) + '\n') @classmethod def from_hub(cls, repo_id: str, model_repo_id: Optional[str]=None, token: Optional[str]=None, **kwargs): hub_kwargs_names = ['cache_dir', 'force_download', 'resume_download', 'proxies', 'revision', 'repo_type', 'subfolder', 'local_files_only'] hub_kwargs = {k: v for (k, v) in kwargs.items() if k in hub_kwargs_names} hub_kwargs['repo_type'] = get_repo_type(repo_id, **hub_kwargs) resolved_config_file = cached_file(repo_id, TOOL_CONFIG_FILE, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False) is_tool_config = resolved_config_file is not None if resolved_config_file is None: resolved_config_file = cached_file(repo_id, CONFIG_NAME, token=token, **hub_kwargs, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False) if resolved_config_file is None: raise EnvironmentError(f'{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`.') with open(resolved_config_file, encoding='utf-8') as reader: config = json.load(reader) if not is_tool_config: if 'custom_tool' not in config: raise EnvironmentError(f'{repo_id} does not provide a mapping to custom tools in its configuration `config.json`.') custom_tool = config['custom_tool'] else: custom_tool = config tool_class = custom_tool['tool_class'] tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs) if len(tool_class.name) == 0: tool_class.name = custom_tool['name'] if tool_class.name != custom_tool['name']: logger.warning(f'{tool_class.__name__} implements a different name in its configuration and class. Using the tool configuration name.') tool_class.name = custom_tool['name'] if len(tool_class.description) == 0: tool_class.description = custom_tool['description'] if tool_class.description != custom_tool['description']: logger.warning(f'{tool_class.__name__} implements a different description in its configuration and class. Using the tool configuration description.') tool_class.description = custom_tool['description'] if tool_class.inputs != custom_tool['inputs']: tool_class.inputs = custom_tool['inputs'] if tool_class.output_type != custom_tool['output_type']: tool_class.output_type = custom_tool['output_type'] return tool_class(**kwargs) def push_to_hub(self, repo_id: str, commit_message: str='Upload tool', private: Optional[bool]=None, token: Optional[Union[bool, str]]=None, create_pr: bool=False) -> str: repo_url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type='space', space_sdk='gradio') repo_id = repo_url.repo_id metadata_update(repo_id, {'tags': ['tool']}, repo_type='space') with tempfile.TemporaryDirectory() as work_dir: self.save(work_dir) logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") return upload_folder(repo_id=repo_id, commit_message=commit_message, folder_path=work_dir, token=token, create_pr=create_pr, repo_type='space') @staticmethod def from_gradio(gradio_tool): import inspect class GradioToolWrapper(Tool): def __init__(self, _gradio_tool): super().__init__() self.name = _gradio_tool.name self.description = _gradio_tool.description self.output_type = 'text' self._gradio_tool = _gradio_tool func_args = list(inspect.signature(_gradio_tool.run).parameters.keys()) self.inputs = {key: '' for key in func_args} def forward(self, *args, **kwargs): return self._gradio_tool.run(*args, **kwargs) return GradioToolWrapper(gradio_tool) @staticmethod def from_langchain(langchain_tool): class LangChainToolWrapper(Tool): def __init__(self, _langchain_tool): super().__init__() self.name = _langchain_tool.name.lower() self.description = _langchain_tool.description self.inputs = parse_langchain_args(_langchain_tool.args) self.output_type = 'text' self.langchain_tool = _langchain_tool def forward(self, *args, **kwargs): tool_input = kwargs.copy() for (index, argument) in enumerate(args): if index < len(self.inputs): input_key = next(iter(self.inputs)) tool_input[input_key] = argument return self.langchain_tool.run(tool_input) return LangChainToolWrapper(langchain_tool) DEFAULT_TOOL_DESCRIPTION_TEMPLATE = '\n- {{ tool.name }}: {{ tool.description }}\n Takes inputs: {{tool.inputs}}\n' def get_tool_description_with_args(tool: Tool, description_template: str=DEFAULT_TOOL_DESCRIPTION_TEMPLATE) -> str: compiled_template = compile_jinja_template(description_template) rendered = compiled_template.render(tool=tool) return rendered @lru_cache def compile_jinja_template(template): try: import jinja2 from jinja2.exceptions import TemplateError from jinja2.sandbox import ImmutableSandboxedEnvironment except ImportError: raise ImportError('template requires jinja2 to be installed.') if version.parse(jinja2.__version__) < version.parse('3.1.0'): raise ImportError(f'template requires jinja2>=3.1.0 to be installed. Your version is {jinja2.__version__}.') def raise_exception(message): raise TemplateError(message) jinja_env = ImmutableSandboxedEnvironment(trim_blocks=True, lstrip_blocks=True) jinja_env.globals['raise_exception'] = raise_exception return jinja_env.from_string(template) class PipelineTool(Tool): pre_processor_class = AutoProcessor model_class = None post_processor_class = AutoProcessor default_checkpoint = None description = 'This is a pipeline tool' name = 'pipeline' inputs = {'prompt': str} output_type = str def __init__(self, model=None, pre_processor=None, post_processor=None, device=None, device_map=None, model_kwargs=None, token=None, **hub_kwargs): if not is_torch_available(): raise ImportError('Please install torch in order to use this tool.') if not is_accelerate_available(): raise ImportError('Please install accelerate in order to use this tool.') if model is None: if self.default_checkpoint is None: raise ValueError('This tool does not implement a default checkpoint, you need to pass one.') model = self.default_checkpoint if pre_processor is None: pre_processor = model self.model = model self.pre_processor = pre_processor self.post_processor = post_processor self.device = device self.device_map = device_map self.model_kwargs = {} if model_kwargs is None else model_kwargs if device_map is not None: self.model_kwargs['device_map'] = device_map self.hub_kwargs = hub_kwargs self.hub_kwargs['token'] = token super().__init__() def setup(self): if isinstance(self.pre_processor, str): self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs) if isinstance(self.model, str): self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs) if self.post_processor is None: self.post_processor = self.pre_processor elif isinstance(self.post_processor, str): self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs) if self.device is None: if self.device_map is not None: self.device = list(self.model.hf_device_map.values())[0] else: self.device = PartialState().default_device if self.device_map is None: self.model.to(self.device) super().setup() def encode(self, raw_inputs): return self.pre_processor(raw_inputs) def forward(self, inputs): with torch.no_grad(): return self.model(**inputs) def decode(self, outputs): return self.post_processor(outputs) def __call__(self, *args, **kwargs): (args, kwargs) = handle_agent_inputs(*args, **kwargs) if not self.is_initialized: self.setup() encoded_inputs = self.encode(*args, **kwargs) tensor_inputs = {k: v for (k, v) in encoded_inputs.items() if isinstance(v, torch.Tensor)} non_tensor_inputs = {k: v for (k, v) in encoded_inputs.items() if not isinstance(v, torch.Tensor)} encoded_inputs = send_to_device(tensor_inputs, self.device) outputs = self.forward({**encoded_inputs, **non_tensor_inputs}) outputs = send_to_device(outputs, 'cpu') decoded_outputs = self.decode(outputs) return handle_agent_outputs(decoded_outputs, self.output_type) def launch_gradio_demo(tool_class: Tool): try: import gradio as gr except ImportError: raise ImportError('Gradio should be installed in order to launch a gradio demo.') tool = tool_class() def fn(*args, **kwargs): return tool(*args, **kwargs) gradio_inputs = [] for (input_name, input_details) in tool_class.inputs.items(): input_type = input_details['type'] if input_type == 'text': gradio_inputs.append(gr.Textbox(label=input_name)) elif input_type == 'image': gradio_inputs.append(gr.Image(label=input_name)) elif input_type == 'audio': gradio_inputs.append(gr.Audio(label=input_name)) else: error_message = f"Input type '{input_type}' not supported." raise ValueError(error_message) gradio_output = tool_class.output_type assert gradio_output in ['text', 'image', 'audio'], f"Output type '{gradio_output}' not supported." gr.Interface(fn=fn, inputs=gradio_inputs, outputs=gradio_output, title=tool_class.__name__, article=tool.description).launch() TOOL_MAPPING = {'document_question_answering': 'DocumentQuestionAnsweringTool', 'image_question_answering': 'ImageQuestionAnsweringTool', 'speech_to_text': 'SpeechToTextTool', 'text_to_speech': 'TextToSpeechTool', 'translation': 'TranslationTool', 'python_interpreter': 'PythonInterpreterTool', 'web_search': 'DuckDuckGoSearchTool'} def load_tool(task_or_repo_id, model_repo_id=None, token=None, **kwargs): if task_or_repo_id in TOOL_MAPPING: tool_class_name = TOOL_MAPPING[task_or_repo_id] main_module = importlib.import_module('transformers') tools_module = main_module.agents tool_class = getattr(tools_module, tool_class_name) return tool_class(model_repo_id, token=token, **kwargs) else: logger.warning_once(f"You're loading a tool from the Hub from {model_repo_id}. Please make sure this is a source that you trust as the code within that tool will be executed on your machine. Always verify the code of the tools that you load. We recommend specifying a `revision` to ensure you're loading the code that you have checked.") return Tool.from_hub(task_or_repo_id, model_repo_id=model_repo_id, token=token, **kwargs) def add_description(description): def inner(func): func.description = description func.name = func.__name__ return func return inner class EndpointClient: def __init__(self, endpoint_url: str, token: Optional[str]=None): self.headers = {**build_hf_headers(token=token), 'Content-Type': 'application/json'} self.endpoint_url = endpoint_url @staticmethod def encode_image(image): _bytes = io.BytesIO() image.save(_bytes, format='PNG') b64 = base64.b64encode(_bytes.getvalue()) return b64.decode('utf-8') @staticmethod def decode_image(raw_image): if not is_vision_available(): raise ImportError('This tool returned an image but Pillow is not installed. Please install it (`pip install Pillow`).') from PIL import Image b64 = base64.b64decode(raw_image) _bytes = io.BytesIO(b64) return Image.open(_bytes) def __call__(self, inputs: Optional[Union[str, Dict, List[str], List[List[str]]]]=None, params: Optional[Dict]=None, data: Optional[bytes]=None, output_image: bool=False) -> Any: payload = {} if inputs: payload['inputs'] = inputs if params: payload['parameters'] = params response = get_session().post(self.endpoint_url, headers=self.headers, json=payload, data=data) if output_image: return self.decode_image(response.content) else: return response.json() def parse_langchain_args(args: Dict[str, str]) -> Dict[str, str]: inputs = args.copy() for arg_details in inputs.values(): if 'title' in arg_details: arg_details.pop('title') return inputs class ToolCollection: def __init__(self, collection_slug: str, token: Optional[str]=None): self._collection = get_collection(collection_slug, token=token) self._hub_repo_ids = {item.item_id for item in self._collection.items if item.item_type == 'space'} self.tools = {Tool.from_hub(repo_id) for repo_id in self._hub_repo_ids} # File: transformers-main/src/transformers/agents/translation.py from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer from .tools import PipelineTool LANGUAGE_CODES = {'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', "Ta'izzi-Adeni Arabic": 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn'} class TranslationTool(PipelineTool): lang_to_code = LANGUAGE_CODES default_checkpoint = 'facebook/nllb-200-distilled-600M' description = f'This is a tool that translates text from a language to another.Both `src_lang`and `tgt_lang` should belong to this list of languages: {list(lang_to_code.keys())}.' name = 'translator' pre_processor_class = AutoTokenizer model_class = AutoModelForSeq2SeqLM inputs = {'text': {'type': 'text', 'description': 'The text to translate'}, 'src_lang': {'type': 'text', 'description': "The language of the text to translate. Written in plain English, such as 'Romanian', or 'Albanian'"}, 'tgt_lang': {'type': 'text', 'description': "The language for the desired ouput language. Written in plain English, such as 'Romanian', or 'Albanian'"}} output_type = 'text' def encode(self, text, src_lang, tgt_lang): if src_lang not in self.lang_to_code: raise ValueError(f'{src_lang} is not a supported language.') if tgt_lang not in self.lang_to_code: raise ValueError(f'{tgt_lang} is not a supported language.') src_lang = self.lang_to_code[src_lang] tgt_lang = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs(text, return_tensors='pt', src_lang=src_lang, tgt_lang=tgt_lang) def forward(self, inputs): return self.model.generate(**inputs) def decode(self, outputs): return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True) # File: transformers-main/src/transformers/audio_utils.py """""" import warnings from typing import List, Optional, Tuple, Union import numpy as np def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]: if mel_scale not in ['slaney', 'htk', 'kaldi']: raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".') if mel_scale == 'htk': return 2595.0 * np.log10(1.0 + freq / 700.0) elif mel_scale == 'kaldi': return 1127.0 * np.log(1.0 + freq / 700.0) min_log_hertz = 1000.0 min_log_mel = 15.0 logstep = 27.0 / np.log(6.4) mels = 3.0 * freq / 200.0 if isinstance(freq, np.ndarray): log_region = freq >= min_log_hertz mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep elif freq >= min_log_hertz: mels = min_log_mel + np.log(freq / min_log_hertz) * logstep return mels def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]: if mel_scale not in ['slaney', 'htk', 'kaldi']: raise ValueError('mel_scale should be one of "htk", "slaney" or "kaldi".') if mel_scale == 'htk': return 700.0 * (np.power(10, mels / 2595.0) - 1.0) elif mel_scale == 'kaldi': return 700.0 * (np.exp(mels / 1127.0) - 1.0) min_log_hertz = 1000.0 min_log_mel = 15.0 logstep = np.log(6.4) / 27.0 freq = 200.0 * mels / 3.0 if isinstance(mels, np.ndarray): log_region = mels >= min_log_mel freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel)) elif mels >= min_log_mel: freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel)) return freq def hertz_to_octave(freq: Union[float, np.ndarray], tuning: Optional[float]=0.0, bins_per_octave: Optional[int]=12): stuttgart_pitch = 440.0 * 2.0 ** (tuning / bins_per_octave) octave = np.log2(freq / (float(stuttgart_pitch) / 16)) return octave def _create_triangular_filter_bank(fft_freqs: np.ndarray, filter_freqs: np.ndarray) -> np.ndarray: filter_diff = np.diff(filter_freqs) slopes = np.expand_dims(filter_freqs, 0) - np.expand_dims(fft_freqs, 1) down_slopes = -slopes[:, :-2] / filter_diff[:-1] up_slopes = slopes[:, 2:] / filter_diff[1:] return np.maximum(np.zeros(1), np.minimum(down_slopes, up_slopes)) def chroma_filter_bank(num_frequency_bins: int, num_chroma: int, sampling_rate: int, tuning: float=0.0, power: Optional[float]=2.0, weighting_parameters: Optional[Tuple[float]]=(5.0, 2), start_at_c_chroma: Optional[bool]=True): frequencies = np.linspace(0, sampling_rate, num_frequency_bins, endpoint=False)[1:] freq_bins = num_chroma * hertz_to_octave(frequencies, tuning=tuning, bins_per_octave=num_chroma) freq_bins = np.concatenate(([freq_bins[0] - 1.5 * num_chroma], freq_bins)) bins_width = np.concatenate((np.maximum(freq_bins[1:] - freq_bins[:-1], 1.0), [1])) chroma_filters = np.subtract.outer(freq_bins, np.arange(0, num_chroma, dtype='d')).T num_chroma2 = np.round(float(num_chroma) / 2) chroma_filters = np.remainder(chroma_filters + num_chroma2 + 10 * num_chroma, num_chroma) - num_chroma2 chroma_filters = np.exp(-0.5 * (2 * chroma_filters / np.tile(bins_width, (num_chroma, 1))) ** 2) if power is not None: chroma_filters = chroma_filters / np.sum(chroma_filters ** power, axis=0, keepdims=True) ** (1.0 / power) if weighting_parameters is not None: (center, half_width) = weighting_parameters chroma_filters *= np.tile(np.exp(-0.5 * ((freq_bins / num_chroma - center) / half_width) ** 2), (num_chroma, 1)) if start_at_c_chroma: chroma_filters = np.roll(chroma_filters, -3 * (num_chroma // 12), axis=0) return np.ascontiguousarray(chroma_filters[:, :int(1 + num_frequency_bins / 2)]) def mel_filter_bank(num_frequency_bins: int, num_mel_filters: int, min_frequency: float, max_frequency: float, sampling_rate: int, norm: Optional[str]=None, mel_scale: str='htk', triangularize_in_mel_space: bool=False) -> np.ndarray: if norm is not None and norm != 'slaney': raise ValueError('norm must be one of None or "slaney"') mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale) mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale) mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2) filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale) if triangularize_in_mel_space: fft_bin_width = sampling_rate / (num_frequency_bins * 2) fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale) filter_freqs = mel_freqs else: fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins) mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs) if norm is not None and norm == 'slaney': enorm = 2.0 / (filter_freqs[2:num_mel_filters + 2] - filter_freqs[:num_mel_filters]) mel_filters *= np.expand_dims(enorm, 0) if (mel_filters.max(axis=0) == 0.0).any(): warnings.warn(f'At least one mel filter has all zero values. The value for `num_mel_filters` ({num_mel_filters}) may be set too high. Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low.') return mel_filters def optimal_fft_length(window_length: int) -> int: return 2 ** int(np.ceil(np.log2(window_length))) def window_function(window_length: int, name: str='hann', periodic: bool=True, frame_length: Optional[int]=None, center: bool=True) -> np.ndarray: length = window_length + 1 if periodic else window_length if name == 'boxcar': window = np.ones(length) elif name in ['hamming', 'hamming_window']: window = np.hamming(length) elif name in ['hann', 'hann_window']: window = np.hanning(length) elif name in ['povey']: window = np.power(np.hanning(length), 0.85) else: raise ValueError(f"Unknown window function '{name}'") if periodic: window = window[:-1] if frame_length is None: return window if window_length > frame_length: raise ValueError(f'Length of the window ({window_length}) may not be larger than frame_length ({frame_length})') padded_window = np.zeros(frame_length) offset = (frame_length - window_length) // 2 if center else 0 padded_window[offset:offset + window_length] = window return padded_window def spectrogram(waveform: np.ndarray, window: np.ndarray, frame_length: int, hop_length: int, fft_length: Optional[int]=None, power: Optional[float]=1.0, center: bool=True, pad_mode: str='reflect', onesided: bool=True, preemphasis: Optional[float]=None, mel_filters: Optional[np.ndarray]=None, mel_floor: float=1e-10, log_mel: Optional[str]=None, reference: float=1.0, min_value: float=1e-10, db_range: Optional[float]=None, remove_dc_offset: Optional[bool]=None, dtype: np.dtype=np.float32) -> np.ndarray: window_length = len(window) if fft_length is None: fft_length = frame_length if frame_length > fft_length: raise ValueError(f'frame_length ({frame_length}) may not be larger than fft_length ({fft_length})') if window_length != frame_length: raise ValueError(f'Length of the window ({window_length}) must equal frame_length ({frame_length})') if hop_length <= 0: raise ValueError('hop_length must be greater than zero') if waveform.ndim != 1: raise ValueError(f'Input waveform must have only one dimension, shape is {waveform.shape}') if np.iscomplexobj(waveform): raise ValueError('Complex-valued input waveforms are not currently supported') if power is None and mel_filters is not None: raise ValueError('You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram.Specify `power` to fix this issue.') if center: padding = [(int(frame_length // 2), int(frame_length // 2))] waveform = np.pad(waveform, padding, mode=pad_mode) waveform = waveform.astype(np.float64) window = window.astype(np.float64) num_frames = int(1 + np.floor((waveform.size - frame_length) / hop_length)) num_frequency_bins = fft_length // 2 + 1 if onesided else fft_length spectrogram = np.empty((num_frames, num_frequency_bins), dtype=np.complex64) fft_func = np.fft.rfft if onesided else np.fft.fft buffer = np.zeros(fft_length) timestep = 0 for frame_idx in range(num_frames): buffer[:frame_length] = waveform[timestep:timestep + frame_length] if remove_dc_offset: buffer[:frame_length] = buffer[:frame_length] - buffer[:frame_length].mean() if preemphasis is not None: buffer[1:frame_length] -= preemphasis * buffer[:frame_length - 1] buffer[0] *= 1 - preemphasis buffer[:frame_length] *= window spectrogram[frame_idx] = fft_func(buffer) timestep += hop_length if power is not None: spectrogram = np.abs(spectrogram, dtype=np.float64) ** power spectrogram = spectrogram.T if mel_filters is not None: spectrogram = np.maximum(mel_floor, np.dot(mel_filters.T, spectrogram)) if power is not None and log_mel is not None: if log_mel == 'log': spectrogram = np.log(spectrogram) elif log_mel == 'log10': spectrogram = np.log10(spectrogram) elif log_mel == 'dB': if power == 1.0: spectrogram = amplitude_to_db(spectrogram, reference, min_value, db_range) elif power == 2.0: spectrogram = power_to_db(spectrogram, reference, min_value, db_range) else: raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}") else: raise ValueError(f'Unknown log_mel option: {log_mel}') spectrogram = np.asarray(spectrogram, dtype) return spectrogram def spectrogram_batch(waveform_list: List[np.ndarray], window: np.ndarray, frame_length: int, hop_length: int, fft_length: Optional[int]=None, power: Optional[float]=1.0, center: bool=True, pad_mode: str='reflect', onesided: bool=True, preemphasis: Optional[float]=None, mel_filters: Optional[np.ndarray]=None, mel_floor: float=1e-10, log_mel: Optional[str]=None, reference: float=1.0, min_value: float=1e-10, db_range: Optional[float]=None, remove_dc_offset: Optional[bool]=None, dtype: np.dtype=np.float32) -> List[np.ndarray]: window_length = len(window) if fft_length is None: fft_length = frame_length if frame_length > fft_length: raise ValueError(f'frame_length ({frame_length}) may not be larger than fft_length ({fft_length})') if window_length != frame_length: raise ValueError(f'Length of the window ({window_length}) must equal frame_length ({frame_length})') if hop_length <= 0: raise ValueError('hop_length must be greater than zero') for waveform in waveform_list: if waveform.ndim != 1: raise ValueError(f'Input waveform must have only one dimension, shape is {waveform.shape}') for waveform in waveform_list: if np.iscomplexobj(waveform): raise ValueError('Complex-valued input waveforms are not currently supported') if center: padding = [(int(frame_length // 2), int(frame_length // 2))] waveform_list = [np.pad(waveform, padding, mode=pad_mode) for waveform in waveform_list] original_waveform_lengths = [len(waveform) for waveform in waveform_list] max_length = max(original_waveform_lengths) padded_waveform_batch = np.array([np.pad(waveform, (0, max_length - len(waveform)), mode='constant', constant_values=0) for waveform in waveform_list], dtype=dtype) padded_waveform_batch = padded_waveform_batch.astype(np.float64) window = window.astype(np.float64) num_frames = int(1 + np.floor((padded_waveform_batch.shape[1] - frame_length) / hop_length)) true_num_frames = [int(1 + np.floor((length - frame_length) / hop_length)) for length in original_waveform_lengths] num_batches = padded_waveform_batch.shape[0] num_frequency_bins = fft_length // 2 + 1 if onesided else fft_length spectrogram = np.empty((num_batches, num_frames, num_frequency_bins), dtype=np.complex64) fft_func = np.fft.rfft if onesided else np.fft.fft buffer = np.zeros((num_batches, fft_length)) for frame_idx in range(num_frames): timestep = frame_idx * hop_length buffer[:, :frame_length] = padded_waveform_batch[:, timestep:timestep + frame_length] if remove_dc_offset: buffer[:, :frame_length] -= buffer[:, :frame_length].mean(axis=1, keepdims=True) if preemphasis is not None: buffer[:, 1:frame_length] -= preemphasis * buffer[:, :frame_length - 1] buffer[:, 0] *= 1 - preemphasis buffer[:, :frame_length] *= window spectrogram[:, frame_idx] = fft_func(buffer) if power is not None: spectrogram = np.abs(spectrogram, dtype=np.float64) ** power if mel_filters is not None: result = np.tensordot(spectrogram, mel_filters.T, axes=([2], [1])) spectrogram = np.maximum(mel_floor, result) if power is not None and log_mel is not None: if log_mel == 'log': spectrogram = np.log(spectrogram) elif log_mel == 'log10': spectrogram = np.log10(spectrogram) elif log_mel == 'dB': if power == 1.0: spectrogram = amplitude_to_db_batch(spectrogram, reference, min_value, db_range) elif power == 2.0: spectrogram = power_to_db_batch(spectrogram, reference, min_value, db_range) else: raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}") else: raise ValueError(f'Unknown log_mel option: {log_mel}') spectrogram = np.asarray(spectrogram, dtype) spectrogram_list = [spectrogram[i, :true_num_frames[i], :].T for i in range(len(true_num_frames))] return spectrogram_list def power_to_db(spectrogram: np.ndarray, reference: float=1.0, min_value: float=1e-10, db_range: Optional[float]=None) -> np.ndarray: if reference <= 0.0: raise ValueError('reference must be greater than zero') if min_value <= 0.0: raise ValueError('min_value must be greater than zero') reference = max(min_value, reference) spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference)) if db_range is not None: if db_range <= 0.0: raise ValueError('db_range must be greater than zero') spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None) return spectrogram def power_to_db_batch(spectrogram: np.ndarray, reference: float=1.0, min_value: float=1e-10, db_range: Optional[float]=None) -> np.ndarray: if reference <= 0.0: raise ValueError('reference must be greater than zero') if min_value <= 0.0: raise ValueError('min_value must be greater than zero') reference = max(min_value, reference) spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference)) if db_range is not None: if db_range <= 0.0: raise ValueError('db_range must be greater than zero') max_values = spectrogram.max(axis=(1, 2), keepdims=True) spectrogram = np.clip(spectrogram, a_min=max_values - db_range, a_max=None) return spectrogram def amplitude_to_db(spectrogram: np.ndarray, reference: float=1.0, min_value: float=1e-05, db_range: Optional[float]=None) -> np.ndarray: if reference <= 0.0: raise ValueError('reference must be greater than zero') if min_value <= 0.0: raise ValueError('min_value must be greater than zero') reference = max(min_value, reference) spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference)) if db_range is not None: if db_range <= 0.0: raise ValueError('db_range must be greater than zero') spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None) return spectrogram def amplitude_to_db_batch(spectrogram: np.ndarray, reference: float=1.0, min_value: float=1e-05, db_range: Optional[float]=None) -> np.ndarray: if reference <= 0.0: raise ValueError('reference must be greater than zero') if min_value <= 0.0: raise ValueError('min_value must be greater than zero') reference = max(min_value, reference) spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None) spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference)) if db_range is not None: if db_range <= 0.0: raise ValueError('db_range must be greater than zero') max_values = spectrogram.max(axis=(1, 2), keepdims=True) spectrogram = np.clip(spectrogram, a_min=max_values - db_range, a_max=None) return spectrogram def get_mel_filter_banks(nb_frequency_bins: int, nb_mel_filters: int, frequency_min: float, frequency_max: float, sample_rate: int, norm: Optional[str]=None, mel_scale: str='htk') -> np.array: warnings.warn('The function `get_mel_filter_banks` is deprecated and will be removed in version 4.31.0 of Transformers', FutureWarning) return mel_filter_bank(num_frequency_bins=nb_frequency_bins, num_mel_filters=nb_mel_filters, min_frequency=frequency_min, max_frequency=frequency_max, sampling_rate=sample_rate, norm=norm, mel_scale=mel_scale) def fram_wave(waveform: np.array, hop_length: int=160, fft_window_size: int=400, center: bool=True): warnings.warn('The function `fram_wave` is deprecated and will be removed in version 4.31.0 of Transformers', FutureWarning) frames = [] for i in range(0, waveform.shape[0] + 1, hop_length): if center: half_window = (fft_window_size - 1) // 2 + 1 start = i - half_window if i > half_window else 0 end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0] frame = waveform[start:end] if start == 0: padd_width = (-i + half_window, 0) frame = np.pad(frame, pad_width=padd_width, mode='reflect') elif end == waveform.shape[0]: padd_width = (0, i - waveform.shape[0] + half_window) frame = np.pad(frame, pad_width=padd_width, mode='reflect') else: frame = waveform[i:i + fft_window_size] frame_width = frame.shape[0] if frame_width < waveform.shape[0]: frame = np.lib.pad(frame, pad_width=(0, fft_window_size - frame_width), mode='constant', constant_values=0) frames.append(frame) frames = np.stack(frames, 0) return frames def stft(frames: np.array, windowing_function: np.array, fft_window_size: int=None): warnings.warn('The function `stft` is deprecated and will be removed in version 4.31.0 of Transformers', FutureWarning) frame_size = frames.shape[1] if fft_window_size is None: fft_window_size = frame_size if fft_window_size < frame_size: raise ValueError('FFT size must greater or equal the frame size') nb_frequency_bins = (fft_window_size >> 1) + 1 spectrogram = np.empty((len(frames), nb_frequency_bins), dtype=np.complex64) fft_signal = np.zeros(fft_window_size) for (f, frame) in enumerate(frames): if windowing_function is not None: np.multiply(frame, windowing_function, out=fft_signal[:frame_size]) else: fft_signal[:frame_size] = frame spectrogram[f] = np.fft.fft(fft_signal, axis=0)[:nb_frequency_bins] return spectrogram.T # File: transformers-main/src/transformers/benchmark/benchmark.py """""" import timeit from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_py3nvml_available, is_torch_available, logging from .benchmark_utils import Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing if is_torch_available(): import torch from .benchmark_args import PyTorchBenchmarkArguments if is_py3nvml_available(): import py3nvml.py3nvml as nvml logger = logging.get_logger(__name__) class PyTorchBenchmark(Benchmark): args: PyTorchBenchmarkArguments configs: PretrainedConfig framework: str = 'PyTorch' @property def framework_version(self): return torch.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.torchscript: config.torchscript = True has_model_class_in_config = hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__('transformers', fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.') else: model = MODEL_MAPPING[config.__class__](config) model.eval() model.to(self.args.device) vocab_size = config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info('Running training in Mixed Precision...') if not self.args.is_gpu: raise ValueError('Mixed precision is possible only for GPU.') model.half() if self.args.torchscript: with torch.no_grad(): inference_model = torch.jit.trace(model, input_ids) else: inference_model = model def encoder_decoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids, decoder_input_ids=input_ids) return outputs def encoder_forward(): with torch.no_grad(): outputs = inference_model(input_ids) return outputs _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _forward def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] has_model_class_in_config = hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = config.architectures[0] transformers_module = __import__('transformers', fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.') else: model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) if self.args.torchscript: raise NotImplementedError('Training for torchscript is currently not implemented') else: train_model = model model.train() model.to(self.args.device) vocab_size = config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device) if self.args.fp16: logger.info('Running training in Mixed Precision...') if not self.args.is_gpu: raise ValueError('Mixed precision is possible only for GPU.') model.half() def compute_loss_and_backprob_encoder(): loss = train_model(input_ids, labels=input_ids)[0] loss.backward() return loss def compute_loss_and_backprob_encoder_decoder(): loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0] loss.backward() return loss _train = compute_loss_and_backprob_encoder_decoder if config.is_encoder_decoder else compute_loss_and_backprob_encoder return _train def _measure_speed(self, func) -> float: try: if self.args.is_tpu or self.args.torchscript: logger.info('Do inference on TPU or torchscript. Running model 5 times to stabilize compilation') timeit.repeat(func, repeat=1, number=5) runtimes = timeit.repeat(func, repeat=self.args.repeat, number=10) if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics: import torch_xla.debug.metrics as met self.print_fn(met.metrics_report()) return min(runtimes) / 10.0 except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return 'N/A' def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: try: if self.args.trace_memory_line_by_line: trace = start_memory_tracing('transformers') if self.args.is_tpu: raise NotImplementedError('Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no-memory` or `args.memory=False`') elif self.args.is_gpu: if not is_py3nvml_available(): logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.") memory = 'N/A' else: logger.info('Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU.') nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) nvml.nvmlShutdown() else: memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) else: summary = None return (memory, summary) except RuntimeError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return ('N/A', None) # File: transformers-main/src/transformers/benchmark/benchmark_args.py from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_xla_available, is_torch_xpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_xla_available(): import torch_xla.core.xla_model as xm logger = logging.get_logger(__name__) @dataclass class PyTorchBenchmarkArguments(BenchmarkArguments): deprecated_args = ['no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process'] def __init__(self, **kwargs): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: positive_arg = deprecated_arg[3:] setattr(self, positive_arg, not kwargs.pop(deprecated_arg)) logger.warning(f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or {positive_arg}={kwargs[positive_arg]}') self.torchscript = kwargs.pop('torchscript', self.torchscript) self.torch_xla_tpu_print_metrics = kwargs.pop('torch_xla_tpu_print_metrics', self.torch_xla_tpu_print_metrics) self.fp16_opt_level = kwargs.pop('fp16_opt_level', self.fp16_opt_level) super().__init__(**kwargs) torchscript: bool = field(default=False, metadata={'help': 'Trace the models using torchscript'}) torch_xla_tpu_print_metrics: bool = field(default=False, metadata={'help': 'Print Xla/PyTorch tpu metrics'}) fp16_opt_level: str = field(default='O1', metadata={'help': "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details at https://nvidia.github.io/apex/amp.html"}) @cached_property def _setup_devices(self) -> Tuple['torch.device', int]: requires_backends(self, ['torch']) logger.info('PyTorch: setting up devices') if not self.cuda: device = torch.device('cpu') n_gpu = 0 elif is_torch_xla_available(): device = xm.xla_device() n_gpu = 0 elif is_torch_xpu_available(): device = torch.device('xpu') n_gpu = torch.xpu.device_count() else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') n_gpu = torch.cuda.device_count() return (device, n_gpu) @property def is_tpu(self): return is_torch_xla_available() and self.tpu @property def device_idx(self) -> int: requires_backends(self, ['torch']) return torch.cuda.current_device() @property def device(self) -> 'torch.device': requires_backends(self, ['torch']) return self._setup_devices[0] @property def n_gpu(self): requires_backends(self, ['torch']) return self._setup_devices[1] @property def is_gpu(self): return self.n_gpu > 0 # File: transformers-main/src/transformers/benchmark/benchmark_args_tf.py from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf logger = logging.get_logger(__name__) @dataclass class TensorFlowBenchmarkArguments(BenchmarkArguments): deprecated_args = ['no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process'] def __init__(self, **kwargs): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: positive_arg = deprecated_arg[3:] kwargs[positive_arg] = not kwargs.pop(deprecated_arg) logger.warning(f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or {positive_arg}={kwargs[positive_arg]}') self.tpu_name = kwargs.pop('tpu_name', self.tpu_name) self.device_idx = kwargs.pop('device_idx', self.device_idx) self.eager_mode = kwargs.pop('eager_mode', self.eager_mode) self.use_xla = kwargs.pop('use_xla', self.use_xla) super().__init__(**kwargs) tpu_name: str = field(default=None, metadata={'help': 'Name of TPU'}) device_idx: int = field(default=0, metadata={'help': 'CPU / GPU device index. Defaults to 0.'}) eager_mode: bool = field(default=False, metadata={'help': 'Benchmark models in eager model.'}) use_xla: bool = field(default=False, metadata={'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'}) @cached_property def _setup_tpu(self) -> Tuple['tf.distribute.cluster_resolver.TPUClusterResolver']: requires_backends(self, ['tf']) tpu = None if self.tpu: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None return tpu @cached_property def _setup_strategy(self) -> Tuple['tf.distribute.Strategy', 'tf.distribute.cluster_resolver.TPUClusterResolver']: requires_backends(self, ['tf']) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) strategy = tf.distribute.TPUStrategy(self._setup_tpu) elif self.is_gpu: tf.config.set_visible_devices(self.gpu_list[self.device_idx], 'GPU') strategy = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}') else: tf.config.set_visible_devices([], 'GPU') strategy = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}') return strategy @property def is_tpu(self) -> bool: requires_backends(self, ['tf']) return self._setup_tpu is not None @property def strategy(self) -> 'tf.distribute.Strategy': requires_backends(self, ['tf']) return self._setup_strategy @property def gpu_list(self): requires_backends(self, ['tf']) return tf.config.list_physical_devices('GPU') @property def n_gpu(self) -> int: requires_backends(self, ['tf']) if self.cuda: return len(self.gpu_list) return 0 @property def is_gpu(self) -> bool: return self.n_gpu > 0 # File: transformers-main/src/transformers/benchmark/benchmark_args_utils.py import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging logger = logging.get_logger(__name__) def list_field(default=None, metadata=None): return field(default_factory=lambda : default, metadata=metadata) @dataclass class BenchmarkArguments: models: List[str] = list_field(default=[], metadata={'help': 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version of all available models'}) batch_sizes: List[int] = list_field(default=[8], metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'}) sequence_lengths: List[int] = list_field(default=[8, 32, 128, 512], metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'}) inference: bool = field(default=True, metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'}) cuda: bool = field(default=True, metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'}) tpu: bool = field(default=True, metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'}) fp16: bool = field(default=False, metadata={'help': 'Use FP16 to accelerate inference.'}) training: bool = field(default=False, metadata={'help': 'Benchmark training of model'}) verbose: bool = field(default=False, metadata={'help': 'Verbose memory tracing'}) speed: bool = field(default=True, metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'}) memory: bool = field(default=True, metadata={'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'}) trace_memory_line_by_line: bool = field(default=False, metadata={'help': 'Trace memory line by line'}) save_to_csv: bool = field(default=False, metadata={'help': 'Save result to a CSV file'}) log_print: bool = field(default=False, metadata={'help': 'Save all print statements in a log file'}) env_print: bool = field(default=False, metadata={'help': 'Whether to print environment information'}) multi_process: bool = field(default=True, metadata={'help': 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled for debugging / testing and on TPU.'}) inference_time_csv_file: str = field(default=f'inference_time_{round(time())}.csv', metadata={'help': 'CSV filename used if saving time results to csv.'}) inference_memory_csv_file: str = field(default=f'inference_memory_{round(time())}.csv', metadata={'help': 'CSV filename used if saving memory results to csv.'}) train_time_csv_file: str = field(default=f'train_time_{round(time())}.csv', metadata={'help': 'CSV filename used if saving time results to csv for training.'}) train_memory_csv_file: str = field(default=f'train_memory_{round(time())}.csv', metadata={'help': 'CSV filename used if saving memory results to csv for training.'}) env_info_csv_file: str = field(default=f'env_info_{round(time())}.csv', metadata={'help': 'CSV filename used if saving environment information.'}) log_filename: str = field(default=f'log_{round(time())}.csv', metadata={'help': 'Log filename used if print statements are saved in log.'}) repeat: int = field(default=3, metadata={'help': 'Times an experiment will be run.'}) only_pretrain_model: bool = field(default=False, metadata={'help': 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain model weights.'}) def __post_init__(self): warnings.warn(f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils are deprecated in general and it is advised to use external Benchmarking libraries to benchmark Transformer models.', FutureWarning) def to_json_string(self): return json.dumps(dataclasses.asdict(self), indent=2) @property def model_names(self) -> List[str]: if len(self.models) <= 0: raise ValueError("Please make sure you provide at least one model name / model identifier, *e.g.* `--models google-bert/bert-base-cased` or `args.models = ['google-bert/bert-base-cased'].") return self.models @property def do_multi_processing(self): if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.') return False else: return True # File: transformers-main/src/transformers/benchmark/benchmark_tf.py """""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_py3nvml_available, is_tf_available, logging from .benchmark_utils import Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_py3nvml_available(): import py3nvml.py3nvml as nvml logger = logging.get_logger(__name__) def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool): def run_func(func): @wraps(func) def run_in_eager_mode(*args, **kwargs): return func(*args, **kwargs) @wraps(func) @tf.function(experimental_compile=use_xla) def run_in_graph_mode(*args, **kwargs): return func(*args, **kwargs) if do_eager_mode is True: if use_xla is not False: raise ValueError('Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.') return run_in_eager_mode else: return run_in_graph_mode return run_func def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ['tf.Tensor']: rng = random.Random() values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)] return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32) class TensorFlowBenchmark(Benchmark): args: TensorFlowBenchmarkArguments configs: PretrainedConfig framework: str = 'TensorFlow' @property def framework_version(self): return tf.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: strategy = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.') _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: strategy = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.') _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.') _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy if strategy is None: raise ValueError('A device strategy has to be initialized before using TensorFlow.') _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.fp16: raise NotImplementedError('Mixed precision is currently not supported.') has_model_class_in_config = hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = 'TF' + config.architectures[0] transformers_module = __import__('transformers', fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.') else: model = TF_MODEL_MAPPING[config.__class__](config) vocab_size = config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_forward(): return model(input_ids, decoder_input_ids=input_ids, training=False) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_forward(): return model(input_ids, training=False) _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.') if self.args.fp16: raise NotImplementedError('Mixed precision is currently not supported.') has_model_class_in_config = hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = 'TF' + config.architectures[0] transformers_module = __import__('transformers', fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.') else: model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) vocab_size = config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_train(): loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_train(): loss = model(input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _measure_speed(self, func) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: logger.info('Do inference on TPU. Running model 5 times to stabilize compilation') timeit.repeat(func, repeat=1, number=5) runtimes = timeit.repeat(func, repeat=self.args.repeat, number=10) return min(runtimes) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}") def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: logger.info('Note that TensorFlow allocates more memory than it might need to speed up computation. The memory reported here corresponds to the memory reported by `nvidia-smi`, which can vary depending on total available memory on the GPU that is used.') with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError('`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory consumption line by line.') trace = start_memory_tracing('transformers') if self.args.is_tpu: raise NotImplementedError('Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `args.memory=False`') elif self.args.is_gpu: if not is_py3nvml_available(): logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.") memory = 'N/A' else: logger.info('Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU.') nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) nvml.nvmlShutdown() elif self.args.trace_memory_line_by_line: logger.info('When enabling line by line tracing, the max peak memory for CPU is inaccurate in TensorFlow.') memory = None else: memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) if memory is None: memory = summary.total else: summary = None return (memory, summary) except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}") return ('N/A', None) # File: transformers-main/src/transformers/benchmark/benchmark_utils.py """""" import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): from torch.cuda import empty_cache as torch_empty_cache if is_tf_available(): from tensorflow.python.eager import context as tf_context if is_psutil_available(): import psutil if is_py3nvml_available(): import py3nvml.py3nvml as nvml if platform.system() == 'Windows': from signal import CTRL_C_EVENT as SIGKILL else: from signal import SIGKILL logger = logging.get_logger(__name__) _is_memory_tracing_enabled = False BenchmarkOutput = namedtuple('BenchmarkOutput', ['time_inference_result', 'memory_inference_result', 'time_train_result', 'memory_train_result', 'inference_summary', 'train_summary']) def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]: def multi_process_func(*args, **kwargs): def wrapper_func(queue: Queue, *args): try: result = func(*args) except Exception as e: logger.error(e) print(e) result = 'N/A' queue.put(result) queue = Queue() p = Process(target=wrapper_func, args=[queue] + list(args)) p.start() result = queue.get() p.join() return result if do_multi_processing: logger.info(f'Function {func} is executed in its own process...') return multi_process_func else: return func def is_memory_tracing_enabled(): global _is_memory_tracing_enabled return _is_memory_tracing_enabled class Frame(NamedTuple): filename: str module: str line_number: int event: str line_text: str class UsedMemoryState(NamedTuple): frame: Frame cpu_memory: int gpu_memory: int class Memory(NamedTuple): bytes: int def __repr__(self) -> str: return str(bytes_to_mega_bytes(self.bytes)) class MemoryState(NamedTuple): frame: Frame cpu: Memory gpu: Memory cpu_gpu: Memory class MemorySummary(NamedTuple): sequential: List[MemoryState] cumulative: List[MemoryState] current: List[MemoryState] total: Memory MemoryTrace = List[UsedMemoryState] def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int: def get_cpu_memory(process_id: int) -> int: process = psutil.Process(process_id) try: meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') else 'get_memory_info' memory = getattr(process, meminfo_attr)()[0] except psutil.AccessDenied: raise ValueError('Error with Psutil.') return memory if not is_psutil_available(): logger.warning("Psutil not installed, we won't log CPU memory usage. Install Psutil (pip install psutil) to use CPU memory tracing.") max_memory = 'N/A' else: class MemoryMeasureProcess(Process): def __init__(self, process_id: int, child_connection: Connection, interval: float): super().__init__() self.process_id = process_id self.interval = interval self.connection = child_connection self.num_measurements = 1 self.mem_usage = get_cpu_memory(self.process_id) def run(self): self.connection.send(0) stop = False while True: self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id)) self.num_measurements += 1 if stop: break stop = self.connection.poll(self.interval) self.connection.send(self.mem_usage) self.connection.send(self.num_measurements) while True: (child_connection, parent_connection) = Pipe() mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval) mem_process.start() parent_connection.recv() try: function() parent_connection.send(0) max_memory = parent_connection.recv() num_measurements = parent_connection.recv() except Exception: parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): os.kill(child.pid, SIGKILL) mem_process.join(0) raise RuntimeError('Process killed. Error in Process') mem_process.join(20 * interval) if num_measurements > 4 or interval < 1e-06: break interval /= 10 return max_memory def start_memory_tracing(modules_to_trace: Optional[Union[str, Iterable[str]]]=None, modules_not_to_trace: Optional[Union[str, Iterable[str]]]=None, events_to_trace: str='line', gpus_to_trace: Optional[List[int]]=None) -> MemoryTrace: if is_psutil_available(): process = psutil.Process(os.getpid()) else: logger.warning("Psutil not installed, we won't log CPU memory usage. Install psutil (pip install psutil) to use CPU memory tracing.") process = None if is_py3nvml_available(): try: nvml.nvmlInit() devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace nvml.nvmlShutdown() except (OSError, nvml.NVMLError): logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.") log_gpu = False else: log_gpu = is_torch_available() or is_tf_available() else: logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to use GPU memory tracing.") log_gpu = False memory_trace = [] def traceit(frame, event, args): global _is_memory_tracing_enabled if not _is_memory_tracing_enabled: return traceit if events_to_trace is not None: if isinstance(events_to_trace, str) and event != events_to_trace: return traceit elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace: return traceit if '__name__' not in frame.f_globals: return traceit name = frame.f_globals['__name__'] if not isinstance(name, str): return traceit else: if modules_to_trace is not None: if isinstance(modules_to_trace, str) and modules_to_trace not in name: return traceit elif isinstance(modules_to_trace, (list, tuple)) and all((m not in name for m in modules_to_trace)): return traceit if modules_not_to_trace is not None: if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name: return traceit elif isinstance(modules_not_to_trace, (list, tuple)) and any((m in name for m in modules_not_to_trace)): return traceit lineno = frame.f_lineno filename = frame.f_globals['__file__'] if filename.endswith('.pyc') or filename.endswith('.pyo'): filename = filename[:-1] line = linecache.getline(filename, lineno).rstrip() traced_state = Frame(filename, name, lineno, event, line) cpu_mem = 0 if process is not None: mem = process.memory_info() cpu_mem = mem.rss gpu_mem = 0 if log_gpu: if is_torch_available(): torch_empty_cache() if is_tf_available(): tf_context.context()._clear_caches() nvml.nvmlInit() for i in devices: handle = nvml.nvmlDeviceGetHandleByIndex(i) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) gpu_mem += meminfo.used nvml.nvmlShutdown() mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem) memory_trace.append(mem_state) return traceit sys.settrace(traceit) global _is_memory_tracing_enabled _is_memory_tracing_enabled = True return memory_trace def stop_memory_tracing(memory_trace: Optional[MemoryTrace]=None, ignore_released_memory: bool=True) -> Optional[MemorySummary]: global _is_memory_tracing_enabled _is_memory_tracing_enabled = False if memory_trace is not None and len(memory_trace) > 1: memory_diff_trace = [] memory_curr_trace = [] cumulative_memory_dict = defaultdict(lambda : [0, 0, 0]) for ((frame, cpu_mem, gpu_mem), (next_frame, next_cpu_mem, next_gpu_mem)) in zip(memory_trace[:-1], memory_trace[1:]): cpu_mem_inc = next_cpu_mem - cpu_mem gpu_mem_inc = next_gpu_mem - gpu_mem cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc memory_diff_trace.append(MemoryState(frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc))) memory_curr_trace.append(MemoryState(frame=frame, cpu=Memory(next_cpu_mem), gpu=Memory(next_gpu_mem), cpu_gpu=Memory(next_gpu_mem + next_cpu_mem))) cumulative_memory_dict[frame][0] += cpu_mem_inc cumulative_memory_dict[frame][1] += gpu_mem_inc cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc cumulative_memory = sorted(cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True) cumulative_memory = [MemoryState(frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc)) for (frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc)) in cumulative_memory] memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True) if ignore_released_memory: total_memory = sum((max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)) else: total_memory = sum((step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)) total_memory = Memory(total_memory) return MemorySummary(sequential=memory_diff_trace, cumulative=cumulative_memory, current=memory_curr_trace, total=total_memory) return None def bytes_to_mega_bytes(memory_amount: int) -> int: return memory_amount >> 20 class Benchmark(ABC): args: BenchmarkArguments configs: PretrainedConfig framework: str def __init__(self, args: BenchmarkArguments=None, configs: PretrainedConfig=None): self.args = args if configs is None: self.config_dict = {model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names} else: self.config_dict = dict(zip(self.args.model_names, configs)) warnings.warn(f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils are deprecated in general and it is advised to use external Benchmarking libraries to benchmark Transformer models.', FutureWarning) if self.args.memory and os.getenv('TRANSFORMERS_USE_MULTIPROCESSING') == 0: logger.warning("Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing.") self._print_fn = None self._framework_version = None self._environment_info = None @property def print_fn(self): if self._print_fn is None: if self.args.log_print: def print_and_log(*args): with open(self.args.log_filename, 'a') as log_file: log_file.write(''.join(args) + '\n') print(*args) self._print_fn = print_and_log else: self._print_fn = print return self._print_fn @property @abstractmethod def framework_version(self): pass @abstractmethod def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: pass @abstractmethod def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]: pass def inference_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs) def train_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs) def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs) def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs) def run(self): result_dict = {model_name: {} for model_name in self.args.model_names} inference_result_time = copy.deepcopy(result_dict) inference_result_memory = copy.deepcopy(result_dict) train_result_time = copy.deepcopy(result_dict) train_result_memory = copy.deepcopy(result_dict) for (c, model_name) in enumerate(self.args.model_names): self.print_fn(f'{c + 1} / {len(self.args.model_names)}') model_dict = {'bs': self.args.batch_sizes, 'ss': self.args.sequence_lengths, 'result': {i: {} for i in self.args.batch_sizes}} inference_result_time[model_name] = copy.deepcopy(model_dict) inference_result_memory[model_name] = copy.deepcopy(model_dict) train_result_time[model_name] = copy.deepcopy(model_dict) train_result_memory[model_name] = copy.deepcopy(model_dict) inference_summary = train_summary = None for batch_size in self.args.batch_sizes: for sequence_length in self.args.sequence_lengths: if self.args.inference: if self.args.memory: (memory, inference_summary) = self.inference_memory(model_name, batch_size, sequence_length) inference_result_memory[model_name]['result'][batch_size][sequence_length] = memory if self.args.speed: time = self.inference_speed(model_name, batch_size, sequence_length) inference_result_time[model_name]['result'][batch_size][sequence_length] = time if self.args.training: if self.args.memory: (memory, train_summary) = self.train_memory(model_name, batch_size, sequence_length) train_result_memory[model_name]['result'][batch_size][sequence_length] = memory if self.args.speed: time = self.train_speed(model_name, batch_size, sequence_length) train_result_time[model_name]['result'][batch_size][sequence_length] = time if self.args.inference: if self.args.speed: self.print_fn('\n' + 20 * '=' + 'INFERENCE - SPEED - RESULT'.center(40) + 20 * '=') self.print_results(inference_result_time, type_label='Time in s') self.save_to_csv(inference_result_time, self.args.inference_time_csv_file) if self.args.is_tpu: self.print_fn('TPU was used for inference. Note that the time after compilation stabilized (after ~10 inferences model.forward(..) calls) was measured.') if self.args.memory: self.print_fn('\n' + 20 * '=' + 'INFERENCE - MEMORY - RESULT'.center(40) + 20 * '=') self.print_results(inference_result_memory, type_label='Memory in MB') self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn('\n' + 20 * '=' + 'INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY'.center(40) + 20 * '=') self.print_memory_trace_statistics(inference_summary) if self.args.training: if self.args.speed: self.print_fn('\n' + 20 * '=' + 'TRAIN - SPEED - RESULTS'.center(40) + 20 * '=') self.print_results(train_result_time, 'Time in s') self.save_to_csv(train_result_time, self.args.train_time_csv_file) if self.args.is_tpu: self.print_fn('TPU was used for training. Note that the time after compilation stabilized (after ~10 train loss=model.forward(...) + loss.backward() calls) was measured.') if self.args.memory: self.print_fn('\n' + 20 * '=' + 'TRAIN - MEMORY - RESULTS'.center(40) + 20 * '=') self.print_results(train_result_memory, type_label='Memory in MB') self.save_to_csv(train_result_memory, self.args.train_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn('\n' + 20 * '=' + 'TRAIN - MEMOMRY - LINE BY LINE - SUMMARY'.center(40) + 20 * '=') self.print_memory_trace_statistics(train_summary) if self.args.env_print: self.print_fn('\n' + 20 * '=' + 'ENVIRONMENT INFORMATION'.center(40) + 20 * '=') self.print_fn('\n'.join([f'- {prop}: {val}' for (prop, val) in self.environment_info.items()]) + '\n') if self.args.save_to_csv: with open(self.args.env_info_csv_file, mode='w', newline='') as csv_file: writer = csv.writer(csv_file) for (key, value) in self.environment_info.items(): writer.writerow([key, value]) return BenchmarkOutput(inference_result_time, inference_result_memory, train_result_time, train_result_memory, inference_summary, train_summary) @property def environment_info(self): if self._environment_info is None: info = {} info['transformers_version'] = version info['framework'] = self.framework if self.framework == 'PyTorch': info['use_torchscript'] = self.args.torchscript if self.framework == 'TensorFlow': info['eager_mode'] = self.args.eager_mode info['use_xla'] = self.args.use_xla info['framework_version'] = self.framework_version info['python_version'] = platform.python_version() info['system'] = platform.system() info['cpu'] = platform.processor() info['architecture'] = platform.architecture()[0] info['date'] = datetime.date(datetime.now()) info['time'] = datetime.time(datetime.now()) info['fp16'] = self.args.fp16 info['use_multiprocessing'] = self.args.do_multi_processing info['only_pretrain_model'] = self.args.only_pretrain_model if is_psutil_available(): info['cpu_ram_mb'] = bytes_to_mega_bytes(psutil.virtual_memory().total) else: logger.warning("Psutil not installed, we won't log available CPU memory. Install psutil (pip install psutil) to log available CPU memory.") info['cpu_ram_mb'] = 'N/A' info['use_gpu'] = self.args.is_gpu if self.args.is_gpu: info['num_gpus'] = 1 if is_py3nvml_available(): nvml.nvmlInit() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) info['gpu'] = nvml.nvmlDeviceGetName(handle) info['gpu_ram_mb'] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total) info['gpu_power_watts'] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000 info['gpu_performance_state'] = nvml.nvmlDeviceGetPerformanceState(handle) nvml.nvmlShutdown() else: logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.") info['gpu'] = 'N/A' info['gpu_ram_mb'] = 'N/A' info['gpu_power_watts'] = 'N/A' info['gpu_performance_state'] = 'N/A' info['use_tpu'] = self.args.is_tpu self._environment_info = info return self._environment_info def print_results(self, result_dict, type_label): self.print_fn(80 * '-') self.print_fn('Model Name'.center(30) + 'Batch Size'.center(15) + 'Seq Length'.center(15) + type_label.center(15)) self.print_fn(80 * '-') for model_name in self.args.model_names: for batch_size in result_dict[model_name]['bs']: for sequence_length in result_dict[model_name]['ss']: result = result_dict[model_name]['result'][batch_size][sequence_length] if isinstance(result, float): result = round(1000 * result) / 1000 result = '< 0.001' if result == 0.0 else str(result) else: result = str(result) self.print_fn(model_name[:30].center(30) + str(batch_size).center(15), str(sequence_length).center(15), result.center(15)) self.print_fn(80 * '-') def print_memory_trace_statistics(self, summary: MemorySummary): self.print_fn('\nLine by line memory consumption:\n' + '\n'.join((f'{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.sequential))) self.print_fn('\nLines with top memory consumption:\n' + '\n'.join((f'=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.cumulative[:6]))) self.print_fn('\nLines with lowest memory consumption:\n' + '\n'.join((f'=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}' for state in summary.cumulative[-6:]))) self.print_fn(f'\nTotal memory increase: {summary.total}') def save_to_csv(self, result_dict, filename): if not self.args.save_to_csv: return self.print_fn('Saving results to csv.') with open(filename, mode='w') as csv_file: if len(self.args.model_names) <= 0: raise ValueError(f'At least 1 model should be defined, but got {self.model_names}') fieldnames = ['model', 'batch_size', 'sequence_length'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ['result']) writer.writeheader() for model_name in self.args.model_names: result_dict_model = result_dict[model_name]['result'] for bs in result_dict_model: for ss in result_dict_model[bs]: result_model = result_dict_model[bs][ss] writer.writerow({'model': model_name, 'batch_size': bs, 'sequence_length': ss, 'result': ('{}' if not isinstance(result_model, float) else '{:.4f}').format(result_model)}) # File: transformers-main/src/transformers/cache_utils.py import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch from packaging import version from .configuration_utils import PretrainedConfig from .utils import is_hqq_available, is_quanto_available, is_torchdynamo_compiling, logging if is_quanto_available(): quanto_version = version.parse(importlib.metadata.version('quanto')) if quanto_version >= version.parse('0.2.0'): from quanto import AffineQuantizer, MaxOptimizer, qint2, qint4 if is_hqq_available(): from hqq.core.quantize import Quantizer as HQQQuantizer logger = logging.get_logger(__name__) class Cache(torch.nn.Module): def __init__(self): super().__init__() def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError('Make sure to implement `update` in a subclass.') def get_seq_length(self, layer_idx: Optional[int]=0) -> int: raise NotImplementedError('Make sure to implement `get_seq_length` in a subclass.') def get_max_length(self) -> Optional[int]: raise NotImplementedError('Make sure to implement `get_max_length` in a subclass.') def get_usable_length(self, new_seq_length: int, layer_idx: Optional[int]=0) -> int: max_length = self.get_max_length() previous_seq_length = self.get_seq_length(layer_idx) if max_length is not None and previous_seq_length + new_seq_length > max_length: return max_length - new_seq_length return previous_seq_length def reorder_cache(self, beam_idx: torch.LongTensor): for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) @property def seen_tokens(self): logger.warning_once('The `seen_tokens` attribute is deprecated and will be removed in v4.41. Use the `cache_position` model input instead.') if hasattr(self, '_seen_tokens'): return self._seen_tokens else: return None @dataclass class CacheConfig: cache_implementation: None @classmethod def from_dict(cls, config_dict, **kwargs): config = cls(**config_dict) to_remove = [] for (key, value) in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) return config def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, 'w', encoding='utf-8') as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + '\n' writer.write(json_string) def to_dict(self) -> Dict[str, Any]: return copy.deepcopy(self.__dict__) def __iter__(self): for (attr, value) in copy.deepcopy(self.__dict__).items(): yield (attr, value) def __repr__(self): return f'{self.__class__.__name__} {self.to_json_string()}' def to_json_string(self): return json.dumps(self.__dict__, indent=2) + '\n' def update(self, **kwargs): to_remove = [] for (key, value) in kwargs.items(): if hasattr(self, key): setattr(self, key, value) to_remove.append(key) unused_kwargs = {key: value for (key, value) in kwargs.items() if key not in to_remove} return unused_kwargs @dataclass class QuantizedCacheConfig(CacheConfig): def __init__(self, backend: str='quanto', nbits: Optional[int]=4, axis_key: Optional[int]=0, axis_value: Optional[int]=0, q_group_size: Optional[int]=64, residual_length: Optional[int]=128, compute_dtype: Optional[torch.dtype]=torch.float16, device: Optional[str]='cpu'): self.backend = backend self.nbits = nbits self.axis_key = axis_key self.axis_value = axis_value self.q_group_size = q_group_size self.residual_length = residual_length self.compute_dtype = compute_dtype self.device = device def validate(self): incorrect_arg_msg = 'Some of the keys in `cache_config` are defined incorrectly. `{key}` should be {correct_value}` but found {found_value}' if self.nbits not in [1, 2, 3, 4, 8]: raise ValueError(incorrect_arg_msg.format(key='nbits', correct_value='2 or 4 or 8', found_value=self.nbits)) if self.q_group_size <= 0: raise ValueError(incorrect_arg_msg.format(key='q_group_size', correct_value='a positive integer', found_value=self.q_group_size)) if self.residual_length < 0: raise ValueError(incorrect_arg_msg.format(key='residual_length', correct_value='a positive integer', found_value=self.residual_length)) if self.axis_key not in [0, 1, -1]: raise ValueError(incorrect_arg_msg.format(key='axis_key', correct_value='`1` or `0`, `-1`', found_value=self.axis_key)) if self.axis_value not in [0, 1, -1]: raise ValueError(incorrect_arg_msg.format(key='axis_value', correct_value='`1` or `0` or `-1`', found_value=self.axis_value)) @dataclass class StaticCacheConfig(CacheConfig): cache_implementation = 'static' def __init__(self, batch_size: int, max_cache_len: int, device='cpu'): self.batch_size = batch_size self.max_cache_len = max_cache_len self.device = device def validate(self): incorrect_arg_msg = 'Some of the keys in `cache_config` are defined incorrectly. `{key}` should be {correct_value}` but found {found_value}' if self.batch_size <= 0: raise ValueError(incorrect_arg_msg.format(key='batch_size', correct_value='> 0', found_value=self.batch_size)) if self.max_cache_len <= 0: raise ValueError(incorrect_arg_msg.format(key='max_cache_len', correct_value='> 0', found_value=self.max_cache_len)) class DynamicCache(Cache): def __init__(self) -> None: super().__init__() self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] self._seen_tokens = 0 def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: if layer_idx < len(self): return (self.key_cache[layer_idx], self.value_cache[layer_idx]) else: raise KeyError(f'Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}') def __iter__(self): for layer_idx in range(len(self)): yield (self.key_cache[layer_idx], self.value_cache[layer_idx]) def __len__(self): return len(self.key_cache) def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: if layer_idx == 0: self._seen_tokens += key_states.shape[-2] if len(self.key_cache) <= layer_idx: self.key_cache.append(key_states) self.value_cache.append(value_states) else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return (self.key_cache[layer_idx], self.value_cache[layer_idx]) def get_seq_length(self, layer_idx: Optional[int]=0) -> int: if len(self.key_cache) <= layer_idx: return 0 return self.key_cache[layer_idx].shape[-2] def get_max_length(self) -> Optional[int]: return None def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: legacy_cache = () for layer_idx in range(len(self)): legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),) return legacy_cache @classmethod def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None) -> 'DynamicCache': cache = cls() if past_key_values is not None: for layer_idx in range(len(past_key_values)): (key_states, value_states) = past_key_values[layer_idx] cache.update(key_states, value_states, layer_idx) return cache def crop(self, max_length: int): if max_length < 0: max_length = self.get_seq_length() - abs(max_length) if self.get_seq_length() <= max_length: return self._seen_tokens = max_length for idx in range(len(self.key_cache)): self.key_cache[idx] = self.key_cache[idx][..., :max_length, :] self.value_cache[idx] = self.value_cache[idx][..., :max_length, :] def batch_split(self, full_batch_size: int, split_size: int) -> List['DynamicCache']: out = [] for i in range(0, full_batch_size, split_size): current_split = DynamicCache() current_split._seen_tokens = self._seen_tokens current_split.key_cache = [tensor[i:i + split_size] for tensor in self.key_cache] current_split.value_cache = [tensor[i:i + split_size] for tensor in self.value_cache] out.append(current_split) return out @classmethod def from_batch_splits(cls, splits: List['DynamicCache']) -> 'DynamicCache': cache = cls() for idx in range(len(splits[0])): layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0) layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0) cache.update(layer_keys, layer_values, idx) return cache def batch_repeat_interleave(self, repeats: int): for layer_idx in range(len(self)): self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0) self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0) def batch_select_indices(self, indices: torch.Tensor): for layer_idx in range(len(self)): self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...] self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...] class OffloadedCache(DynamicCache): def __init__(self) -> None: if not torch.cuda.is_available(): raise RuntimeError('OffloadedCache can only be used with a GPU') super().__init__() self.original_device = [] self.prefetch_stream = torch.cuda.Stream() self.beam_idx = None def prefetch_layer(self, layer_idx: int): if layer_idx < len(self): with torch.cuda.stream(self.prefetch_stream): device = self.original_device[layer_idx] self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device, non_blocking=True) self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device, non_blocking=True) def evict_previous_layer(self, layer_idx: int): if len(self) > 2: prev_layer_idx = (layer_idx - 1) % len(self) self.key_cache[prev_layer_idx] = self.key_cache[prev_layer_idx].to('cpu', non_blocking=True) self.value_cache[prev_layer_idx] = self.value_cache[prev_layer_idx].to('cpu', non_blocking=True) def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: if layer_idx < len(self): torch.cuda.current_stream().synchronize() self.evict_previous_layer(layer_idx) original_device = self.original_device[layer_idx] self.prefetch_stream.synchronize() key_tensor = self.key_cache[layer_idx] value_tensor = self.value_cache[layer_idx] if self.beam_idx is not None: self.beam_idx = self.beam_idx.to(original_device) key_tensor = key_tensor.index_select(0, self.beam_idx) value_tensor = value_tensor.index_select(0, self.beam_idx) self.prefetch_layer((layer_idx + 1) % len(self)) return (key_tensor, value_tensor) else: raise KeyError(f'Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}') def reorder_cache(self, beam_idx: torch.LongTensor): del self.beam_idx self.beam_idx = beam_idx.clone() def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: if layer_idx == 0: self._seen_tokens += key_states.shape[-2] if len(self.key_cache) <= layer_idx: self.key_cache.append(key_states) self.value_cache.append(value_states) self.original_device.append(key_states.device) self.evict_previous_layer(layer_idx) else: (key_tensor, value_tensor) = self[layer_idx] self.key_cache[layer_idx] = torch.cat([key_tensor, key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([value_tensor, value_states], dim=-2) return (self.key_cache[layer_idx], self.value_cache[layer_idx]) from_legacy_cache = None to_legacy_cache = None class QuantizedCache(DynamicCache): def __init__(self, cache_config: QuantizedCacheConfig) -> None: super().__init__() self._quantized_key_cache: List[torch.Tensor] = [] self._quantized_value_cache: List[torch.Tensor] = [] self.nbits = cache_config.nbits self.residual_length = cache_config.residual_length self.q_group_size = cache_config.q_group_size self.axis_key = cache_config.axis_key self.axis_value = cache_config.axis_value self.compute_dtype = cache_config.compute_dtype self.device = cache_config.device super().__init__() def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: if layer_idx == 0: self._seen_tokens += key_states.shape[-2] if len(self.key_cache) <= layer_idx: self._quantized_key_cache.append(self._quantize(key_states.contiguous(), axis=self.axis_key)) self._quantized_value_cache.append(self._quantize(value_states.contiguous(), axis=self.axis_value)) self.key_cache.append(torch.zeros(0, dtype=key_states.dtype, device=key_states.device)) self.value_cache.append(torch.zeros(0, dtype=key_states.dtype, device=key_states.device)) (keys_to_return, values_to_return) = (key_states, value_states) else: dequant_key = self._dequantize(self._quantized_key_cache[layer_idx]) dequant_value = self._dequantize(self._quantized_value_cache[layer_idx]) keys_to_return = [dequant_key, self.key_cache[layer_idx], key_states] values_to_return = [dequant_value, self.value_cache[layer_idx], value_states] keys_to_return = torch.cat(keys_to_return, dim=-2) values_to_return = torch.cat(values_to_return, dim=-2) if self.key_cache[layer_idx].dim() == 4 and self.key_cache[layer_idx].shape[-2] + 1 >= self.residual_length: self._quantized_key_cache[layer_idx] = self._quantize(keys_to_return.contiguous(), axis=self.axis_key) self._quantized_value_cache[layer_idx] = self._quantize(values_to_return.contiguous(), axis=self.axis_value) self.key_cache[layer_idx] = torch.zeros(0, dtype=key_states.dtype, device=key_states.device) self.value_cache[layer_idx] = torch.zeros(0, dtype=key_states.dtype, device=key_states.device) else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) return (keys_to_return, values_to_return) def get_seq_length(self, layer_idx: Optional[int]=0) -> int: if len(self.key_cache) <= layer_idx: return 0 return self._seen_tokens if layer_idx == 0 else self._seen_tokens - 1 def _quantize(self, tensor, axis): raise NotImplementedError('Make sure to implement `_quantize` in a subclass.') def _dequantize(self, q_tensor): raise NotImplementedError('Make sure to implement `_dequantize` in a subclass.') class QuantoQuantizedCache(QuantizedCache): def __init__(self, cache_config: CacheConfig) -> None: super().__init__(cache_config) quanto_version = version.parse(importlib.metadata.version('quanto')) if quanto_version < version.parse('0.2.0'): raise ImportError(f'You need quanto package version to be greater or equal than 0.2.0 to use `QuantoQuantizedCache`. Detected version {quanto_version}. Please upgrade quanto with `pip install -U quanto`') if self.nbits not in [2, 4]: raise ValueError(f'`nbits` for `quanto` backend has to be one of [`2`, `4`] but got {self.nbits}') if self.axis_key not in [0, -1]: raise ValueError(f'`axis_key` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_key}') if self.axis_value not in [0, -1]: raise ValueError(f'`axis_value` for `quanto` backend has to be one of [`0`, `-1`] but got {self.axis_value}') self.qtype = qint4 if self.nbits == 4 else qint2 self.optimizer = MaxOptimizer() def _quantize(self, tensor, axis): (scale, zeropoint) = self.optimizer(tensor, self.qtype.bits, axis, self.q_group_size) qtensor = AffineQuantizer.apply(tensor, self.qtype, axis, self.q_group_size, scale, zeropoint) return qtensor def _dequantize(self, qtensor): return qtensor.dequantize() class HQQQuantizedCache(QuantizedCache): def __init__(self, cache_config: CacheConfig) -> None: super().__init__(cache_config) if self.nbits not in [1, 2, 3, 4, 8]: raise ValueError(f'`nbits` for `HQQ` backend has to be one of [`1`, `2`, `3`, `4`, `8`] but got {self.nbits}') if self.axis_key not in [0, 1]: raise ValueError(f'`axis_key` for `HQQ` backend has to be one of [`0`, `1`] but got {self.axis_key}') if self.axis_value not in [0, 1]: raise ValueError(f'`axis_value` for `HQQ` backend has to be one of [`0`, `1`] but got {self.axis_value}') self.quantizer = HQQQuantizer def _quantize(self, tensor, axis): (qtensor, meta) = self.quantizer.quantize(tensor, axis=axis, device=self.device, compute_dtype=self.compute_dtype, nbits=self.nbits, group_size=self.q_group_size) meta['compute_dtype'] = self.compute_dtype self.quantizer.cuda(qtensor, meta=meta, device=self.device) return (qtensor, meta) def _dequantize(self, qtensor): (quant_tensor, meta) = qtensor tensor = self.quantizer.dequantize(quant_tensor, meta) return tensor class SinkCache(Cache): def __init__(self, window_length: int, num_sink_tokens: int) -> None: super().__init__() self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] self.window_length = window_length self.num_sink_tokens = num_sink_tokens self.cos_sin_rerotation_cache = {} self._cos_cache = None self._sin_cache = None self._seen_tokens = 0 @staticmethod def _rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def _apply_key_rotary_pos_emb(self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: rotated_key_states = key_states * cos + self._rotate_half(key_states) * sin return rotated_key_states def _get_rerotation_cos_sin(self, key_states: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: if key_states.shape[-2] not in self.cos_sin_rerotation_cache: cos = cos.to(torch.float32) sin = sin.to(torch.float32) original_cos = cos[self.num_sink_tokens + key_states.shape[-2]:] shifted_cos = cos[self.num_sink_tokens:-key_states.shape[-2]] original_sin = sin[self.num_sink_tokens + key_states.shape[-2]:] shifted_sin = sin[self.num_sink_tokens:-key_states.shape[-2]] rerotation_cos = original_cos * shifted_cos + original_sin * shifted_sin rerotation_sin = -original_sin * shifted_cos + original_cos * shifted_sin self.cos_sin_rerotation_cache[key_states.shape[-2]] = (rerotation_cos.to(key_states.dtype).unsqueeze(0), rerotation_sin.to(key_states.dtype).unsqueeze(0)) return self.cos_sin_rerotation_cache[key_states.shape[-2]] def get_seq_length(self, layer_idx: Optional[int]=0) -> int: if len(self.key_cache) <= layer_idx: return 0 return self.key_cache[layer_idx].shape[-2] def get_max_length(self) -> Optional[int]: return self.window_length def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: sin = cache_kwargs.get('sin') cos = cache_kwargs.get('cos') partial_rotation_size = cache_kwargs.get('partial_rotation_size') using_rope = cos is not None and sin is not None if layer_idx == 0: self._seen_tokens += key_states.shape[-2] if using_rope and layer_idx == 0: if cos.dim() == 2: self._cos_cache = cos self._sin_cache = sin elif self._cos_cache is None: self._cos_cache = cos[0, ...] self._sin_cache = sin[0, ...] elif self._cos_cache.shape[0] < self.window_length: self._cos_cache = torch.cat([self._cos_cache, cos[0, ...]], dim=0) self._sin_cache = torch.cat([self._sin_cache, sin[0, ...]], dim=0) if len(self.key_cache) <= layer_idx: self.key_cache.append(key_states) self.value_cache.append(value_states) elif key_states.shape[-2] + self.get_seq_length(layer_idx) < self.window_length: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2) else: keys_to_keep = self.key_cache[layer_idx][:, :, -self.window_length + self.num_sink_tokens + key_states.shape[-2]:] if using_rope: (rerotation_cos, rerotation_sin) = self._get_rerotation_cos_sin(key_states, self._cos_cache[:self.window_length], self._sin_cache[:self.window_length]) if partial_rotation_size is not None: (keys_to_keep, keys_pass) = (keys_to_keep[..., :partial_rotation_size], keys_to_keep[..., partial_rotation_size:]) keys_to_keep = self._apply_key_rotary_pos_emb(keys_to_keep, rerotation_cos, rerotation_sin) if partial_rotation_size is not None: keys_to_keep = torch.cat((keys_to_keep, keys_pass), dim=-1) sink_keys = self.key_cache[layer_idx][:, :, :self.num_sink_tokens] self.key_cache[layer_idx] = torch.cat([sink_keys, keys_to_keep, key_states], dim=-2) sink_values = self.value_cache[layer_idx][:, :, :self.num_sink_tokens] values_to_keep = self.value_cache[layer_idx][:, :, -self.window_length + self.num_sink_tokens + value_states.shape[-2]:] self.value_cache[layer_idx] = torch.cat([sink_values, values_to_keep, value_states], dim=-2) return (self.key_cache[layer_idx], self.value_cache[layer_idx]) class StaticCache(Cache): def __init__(self, config: PretrainedConfig, batch_size: int=None, max_cache_len: int=None, device: torch.device=None, dtype: torch.dtype=torch.float32, max_batch_size: Optional[int]=None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]]=None) -> None: super().__init__() if max_batch_size is not None: logger.warning_once(f"The 'max_batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in v4.46. Use the more precisely named 'batch_size' argument instead.") self.batch_size = batch_size or max_batch_size self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len self.head_dim = config.head_dim if hasattr(config, 'head_dim') else config.hidden_size // config.num_attention_heads self.dtype = dtype self.num_key_value_heads = config.num_attention_heads if getattr(config, 'num_key_value_heads', None) is None else config.num_key_value_heads self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] cache_shape = (self.batch_size, self.num_key_value_heads, self.max_cache_len, self.head_dim) for idx in range(config.num_hidden_layers): if layer_device_map is not None: layer_device = layer_device_map[idx] else: layer_device = device new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) if not is_torchdynamo_compiling(): self.register_buffer(f'key_cache_{idx}', torch.zeros(cache_shape, dtype=dtype, device=layer_device)) self.register_buffer(f'value_cache_{idx}', torch.zeros(cache_shape, dtype=dtype, device=layer_device)) new_layer_key_cache = getattr(self, f'key_cache_{idx}') new_layer_value_cache = getattr(self, f'value_cache_{idx}') torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: cache_position = cache_kwargs.get('cache_position') k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] if cache_position is None: k_out.copy_(key_states) v_out.copy_(value_states) else: try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states return (k_out, v_out) def get_seq_length(self, layer_idx: Optional[int]=0) -> int: return self.key_cache[layer_idx][0, 0].any(dim=-1).sum() def get_max_length(self) -> Optional[int]: return self.max_cache_len def reset(self): for layer_idx in range(len(self.key_cache)): self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() class SlidingWindowCache(StaticCache): def __init__(self, config: PretrainedConfig, batch_size: int=None, max_cache_len: int=None, device: torch.device=None, dtype: torch.dtype=torch.float32, max_batch_size: Optional[int]=None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]]=None) -> None: super().__init__() if not hasattr(config, 'sliding_window') or config.sliding_window is None: raise ValueError("Setting `cache_implementation` to 'sliding_window' requires the model config supporting sliding window attention, please check if there is a `sliding_window` field in the model config and it's not set to None.") max_cache_len = min(config.sliding_window, max_cache_len) super().__init__(config=config, batch_size=batch_size, max_cache_len=max_cache_len, device=device, dtype=dtype, max_batch_size=max_batch_size, layer_device_map=layer_device_map) def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor]: cache_position = cache_kwargs.get('cache_position') k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] if cache_position.shape[0] > self.max_cache_len: k_out = key_states[:, :, -self.max_cache_len:, :] v_out = value_states[:, :, -self.max_cache_len:, :] self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return (key_states, value_states) slicing = torch.ones(self.max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0) cache_position = cache_position.clamp(0, self.max_cache_len - 1) to_shift = cache_position >= self.max_cache_len - 1 indices = (slicing + to_shift[-1].int() - 1) % self.max_cache_len k_out = k_out[:, :, indices] v_out = v_out[:, :, indices] try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return (k_out, v_out) def get_max_length(self) -> Optional[int]: return None def reset(self): for layer_idx in range(len(self.key_cache)): self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() class EncoderDecoderCache(Cache): def __init__(self, self_attention_cache: Cache, cross_attention_cache: Cache): super().__init__() self.self_attention_cache = self_attention_cache self.cross_attention_cache = cross_attention_cache self.is_updated = {} for layer_idx in range(len(cross_attention_cache.key_cache)): self.is_updated[layer_idx] = bool(cross_attention_cache.get_seq_length(layer_idx) > 0) def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: if layer_idx < len(self): return (self.self_attention_cache.key_cache[layer_idx], self.self_attention_cache.value_cache[layer_idx], self.cross_attention_cache.key_cache[layer_idx], self.cross_attention_cache.value_cache[layer_idx]) else: raise KeyError(f'Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}') def __len__(self): return len(self.self_attention_cache) def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: legacy_cache = () if len(self.cross_attention_cache) > 0: for (self_attn, cross_attn) in zip(self.self_attention_cache.to_legacy_cache(), self.cross_attention_cache.to_legacy_cache()): legacy_cache += (self_attn + cross_attn,) else: legacy_cache = self.self_attention_cache.to_legacy_cache() return legacy_cache @classmethod def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None) -> 'EncoderDecoderCache': cache = cls(self_attention_cache=DynamicCache(), cross_attention_cache=DynamicCache()) if past_key_values is not None: for layer_idx in range(len(past_key_values)): (key_states, value_states) = past_key_values[layer_idx][:2] cache.self_attention_cache.update(key_states, value_states, layer_idx) if len(past_key_values[layer_idx]) > 2: (key_states, value_states) = past_key_values[layer_idx][2:] cache.cross_attention_cache.update(key_states, value_states, layer_idx) cache.is_updated[layer_idx] = True return cache def get_seq_length(self, layer_idx: Optional[int]=0) -> int: if len(self.self_attention_cache.key_cache) <= layer_idx: return 0 return self.self_attention_cache.key_cache[layer_idx][0, 0].any(dim=-1).sum() def reset(self): if hasattr(self.self_attention_cache, 'reset'): self.self_attention_cache.reset() if hasattr(self.cross_attention_cache, 'reset'): self.cross_attention_cache.reset() elif not hasattr(self.self_attention_cache, 'reset') and (not hasattr(self.cross_attention_cache, 'reset')): raise ValueError(f'Neither self nor cross-attention cache have valid `.reset()` methods. `.reset()` should only be called on compatible cache classes, such as `StaticCache` or `SlidingWindowCache`. Got {self.self_attention_cache.__str__()} for the self attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache.') for layer_idx in self.is_updated: self.is_updated[layer_idx] = False def reorder_cache(self, beam_idx: torch.LongTensor): self.self_attention_cache.reorder_cache(beam_idx) self.cross_attention_cache.reorder_cache(beam_idx) def check_dynamic_cache(self, method: str): if not (isinstance(self.self_attention_cache, DynamicCache) and isinstance(self.cross_attention_cache, DynamicCache)): raise ValueError(f'`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache.') def crop(self, maximum_length: int): self.check_dynamic_cache(self.crop.__name__) self.self_attention_cache.crop(maximum_length) def batch_split(self, full_batch_size: int, split_size: int) -> 'List[EncoderDecoderCache]': self.check_dynamic_cache(self.batch_split.__name__) self_attention_cache = self.self_attention_cache.batch_split(full_batch_size, split_size) cross_attention_cache = self.cross_attention_cache.batch_split(full_batch_size, split_size) out = [] for (self_attn, cross_attn) in zip(self_attention_cache, cross_attention_cache): out.append(EncoderDecoderCache(self_attn, cross_attn)) return out @classmethod def from_batch_splits(cls, splits: List['EncoderDecoderCache']) -> 'EncoderDecoderCache': self_attention_cache = DynamicCache() cross_attention_cache = DynamicCache() for idx in range(len(splits[0])): layer_keys = torch.cat([current.self_attention_cache.key_cache[idx] for current in splits], dim=0) layer_values = torch.cat([current.self_attention_cache.value_cache[idx] for current in splits], dim=0) self_attention_cache.update(layer_keys, layer_values, idx) layer_keys = torch.cat([current.cross_attention_cache.key_cache[idx] for current in splits], dim=0) layer_values = torch.cat([current.cross_attention_cache.value_cache[idx] for current in splits], dim=0) cross_attention_cache.update(layer_keys, layer_values, idx) return cls(self_attention_cache, cross_attention_cache) def batch_repeat_interleave(self, repeats: int): self.check_dynamic_cache(self.batch_repeat_interleave.__name__) self.self_attention_cache.batch_repeat_interleave(repeats) self.cross_attention_cache.batch_repeat_interleave(repeats) def batch_select_indices(self, indices: torch.Tensor): self.check_dynamic_cache(self.batch_select_indices.__name__) self.self_attention_cache.batch_select_indices(indices) self.cross_attention_cache.batch_select_indices(indices) class HybridCache(Cache): def __init__(self, config: PretrainedConfig, batch_size: int=None, max_cache_len: int=None, device: Union[torch.device, str]='cpu', dtype: torch.dtype=torch.float32, max_batch_size: Optional[int]=None, layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]]=None) -> None: super().__init__() if max_batch_size is not None: logger.warning_once(f"The 'max_batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in v4.46. Use the more precisely named 'batch_size' argument instead.") if not hasattr(config, 'sliding_window') or config.sliding_window is None: raise ValueError("Setting `cache_implementation` to 'sliding_window' requires the model config supporting sliding window attention, please check if there is a `sliding_window` field in the model config and it's not set to None.") self.max_cache_len = max_cache_len self.batch_size = batch_size or max_batch_size self.head_dim = config.head_dim if hasattr(config, 'head_dim') else config.hidden_size // config.num_attention_heads self.dtype = dtype self.num_key_value_heads = config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads self.is_sliding = torch.tensor([not bool(i % 2) for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device) self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] global_cache_shape = (self.batch_size, self.num_key_value_heads, max_cache_len, self.head_dim) sliding_cache_shape = (self.batch_size, self.num_key_value_heads, min(config.sliding_window, max_cache_len), self.head_dim) for i in range(config.num_hidden_layers): if layer_device_map is not None: layer_device = layer_device_map[i] else: layer_device = device cache_shape = global_cache_shape if not self.is_sliding[i] else sliding_cache_shape new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device) torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) def _sliding_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len): if cache_position.shape[0] > max_cache_len: k_out = key_states[:, :, -max_cache_len:, :] v_out = value_states[:, :, -max_cache_len:, :] self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return (key_states, value_states) slicing = torch.ones(max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0) cache_position = cache_position.clamp(0, max_cache_len - 1) to_shift = cache_position >= max_cache_len - 1 indices = (slicing + to_shift[-1].int() - 1) % max_cache_len k_out = k_out[:, :, indices] v_out = v_out[:, :, indices] k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() self.key_cache[layer_idx] += k_out self.value_cache[layer_idx] += v_out return (k_out, v_out) def _static_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len): k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states self.key_cache[layer_idx] = k_out self.value_cache[layer_idx] = v_out return (k_out, v_out) def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor]: cache_position = cache_kwargs.get('cache_position') sliding_window = cache_kwargs.get('sliding_window') k_out = self.key_cache[layer_idx] v_out = self.value_cache[layer_idx] if sliding_window: update_fn = self._sliding_update else: update_fn = self._static_update return update_fn(cache_position, layer_idx, key_states, value_states, k_out, v_out, k_out.shape[2]) def get_max_length(self) -> Optional[int]: return self.max_cache_len def get_seq_length(self, layer_idx: Optional[int]=0): return None def reset(self): for layer_idx in range(len(self.key_cache)): self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() class MambaCache: def __init__(self, config: PretrainedConfig, batch_size: int=None, dtype: torch.dtype=torch.float16, device: Optional[Union[torch.device, str]]=None, max_batch_size: Optional[int]=None): if max_batch_size is not None: logger.warning_once(f"The 'max_batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in v4.46. Use the more precisely named 'batch_size' argument instead.") self.dtype = dtype self.batch_size = batch_size or max_batch_size self.intermediate_size = config.intermediate_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.conv_states: torch.Tensor = torch.zeros(config.num_hidden_layers, self.batch_size, self.intermediate_size, self.conv_kernel_size, device=device, dtype=dtype) self.ssm_states: torch.Tensor = torch.zeros(config.num_hidden_layers, self.batch_size, self.intermediate_size, self.ssm_state_size, device=device, dtype=dtype) torch._dynamo.mark_static_address(self.conv_states) torch._dynamo.mark_static_address(self.ssm_states) def update_conv_state(self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor) -> torch.Tensor: conv_state = self.conv_states[layer_idx] cache_position = cache_position.clamp(0, self.conv_kernel_size - 1) conv_state = conv_state.roll(shifts=-1, dims=-1) conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device) self.conv_states[layer_idx].zero_() self.conv_states[layer_idx] += conv_state return self.conv_states[layer_idx] def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor): self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device) return self.ssm_states[layer_idx] def reset(self): self.conv_states.zero_() self.ssm_states.zero_() class OffloadedStaticCache(StaticCache): def __init__(self, config: PretrainedConfig, max_batch_size: int, max_cache_len: Optional[int], device: Union[str, torch.device], dtype: Optional[torch.dtype]=None, offload_device: Union[str, torch.device]=torch.device('cpu')) -> None: self.max_batch_size = max_batch_size self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len self.device = torch.device(device) self.offload_device = torch.device(offload_device) self.dtype = dtype if dtype is not None else torch.float32 head_dim = config.head_dim if hasattr(config, 'head_dim') else config.hidden_size // config.num_attention_heads num_key_value_heads = config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads cache_shape = (max_batch_size, num_key_value_heads, self.max_cache_len, head_dim) self.key_cache: List[torch.Tensor] = [] self.value_cache: List[torch.Tensor] = [] for i in range(config.num_hidden_layers): device = self.device if i == 0 else self.offload_device (key_cache, value_cache) = self._create_key_value_cache_tensors(cache_shape, device) self.key_cache.append(key_cache) self.value_cache.append(value_cache) self._device_key_cache: List[torch.Tensor] = [] self._device_value_cache: List[torch.Tensor] = [] for i in range(2): (key_cache, value_cache) = self._create_key_value_cache_tensors(cache_shape, self.device) self._device_key_cache.append(key_cache) self._device_value_cache.append(value_cache) self._seen_tokens = 0 self._prefetch_stream = torch.cuda.Stream() if self.device.type == 'cuda' else None def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]: if layer_idx == 0: self._seen_tokens += key_states.shape[-2] k_out = self.key_cache[0] v_out = self.value_cache[0] else: if self._prefetch_stream is not None: torch.cuda.default_stream(self.device).wait_stream(self._prefetch_stream) k_out = self._device_key_cache[layer_idx & 1] v_out = self._device_value_cache[layer_idx & 1] self._prefetch_layer(layer_idx + 1) cache_position = cache_kwargs.get('cache_position') if cache_kwargs is not None else None if cache_position is None: k_out.copy_(key_states) v_out.copy_(value_states) if layer_idx == 0: self.key_cache[layer_idx].copy_(key_states.to(self.offload_device)) self.value_cache[layer_idx].copy_(value_states.to(self.offload_device)) else: try: k_out.index_copy_(2, cache_position, key_states) v_out.index_copy_(2, cache_position, value_states) except NotImplementedError: k_out[:, :, cache_position] = key_states v_out[:, :, cache_position] = value_states if layer_idx != 0: cache_position = cache_position.to(self.offload_device) key_states = key_states.to(self.offload_device) value_states = value_states.to(self.offload_device) try: self.key_cache[layer_idx].index_copy_(2, cache_position, key_states) self.value_cache[layer_idx].index_copy_(2, cache_position, value_states) except NotImplementedError: self.key_cache[layer_idx][:, :, cache_position] = key_states self.value_cache[layer_idx][:, :, cache_position] = value_states return (k_out, v_out) def get_seq_length(self, layer_idx: Optional[int]=0) -> int: return self._seen_tokens def get_max_length(self) -> Optional[int]: return self.max_cache_len def reset(self) -> None: self._seen_tokens = 0 for layer_idx in range(len(self.key_cache)): self.key_cache[layer_idx].zero_() self.value_cache[layer_idx].zero_() @property def seen_tokens(self) -> int: return self._seen_tokens def _create_key_value_cache_tensors(self, shape: Tuple[int, ...], device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]: is_cpu_device = device == torch.device('cpu') key_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device) value_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device) torch._dynamo.mark_static_address(key_cache) torch._dynamo.mark_static_address(value_cache) return (key_cache, value_cache) def _prefetch_layer(self, layer_idx: int) -> None: if layer_idx >= len(self.key_cache): return if self._prefetch_stream is not None: with torch.cuda.stream(self._prefetch_stream): self._prefetch_layer_in_context(layer_idx) else: self._prefetch_layer_in_context(layer_idx) def _prefetch_layer_in_context(self, layer_idx: int) -> None: self._device_key_cache[layer_idx & 1].copy_(self.key_cache[layer_idx], non_blocking=True) self._device_value_cache[layer_idx & 1].copy_(self.value_cache[layer_idx], non_blocking=True) # File: transformers-main/src/transformers/commands/__init__.py from abc import ABC, abstractmethod from argparse import ArgumentParser class BaseTransformersCLICommand(ABC): @staticmethod @abstractmethod def register_subcommand(parser: ArgumentParser): raise NotImplementedError() @abstractmethod def run(self): raise NotImplementedError() # File: transformers-main/src/transformers/commands/add_new_model_like.py import difflib import json import os import re from argparse import ArgumentParser, Namespace from dataclasses import dataclass from datetime import date from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union import yaml from ..models import auto as auto_module from ..models.auto.configuration_auto import model_type_to_module_name from ..utils import is_flax_available, is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand logger = logging.get_logger(__name__) CURRENT_YEAR = date.today().year TRANSFORMERS_PATH = Path(__file__).parent.parent REPO_PATH = TRANSFORMERS_PATH.parent.parent @dataclass class ModelPatterns: model_name: str checkpoint: str model_type: Optional[str] = None model_lower_cased: Optional[str] = None model_camel_cased: Optional[str] = None model_upper_cased: Optional[str] = None config_class: Optional[str] = None tokenizer_class: Optional[str] = None image_processor_class: Optional[str] = None feature_extractor_class: Optional[str] = None processor_class: Optional[str] = None def __post_init__(self): if self.model_type is None: self.model_type = self.model_name.lower().replace(' ', '-') if self.model_lower_cased is None: self.model_lower_cased = self.model_name.lower().replace(' ', '_').replace('-', '_') if self.model_camel_cased is None: words = self.model_name.split(' ') words = list(chain(*[w.split('-') for w in words])) words = [w[0].upper() + w[1:] for w in words] self.model_camel_cased = ''.join(words) if self.model_upper_cased is None: self.model_upper_cased = self.model_name.upper().replace(' ', '_').replace('-', '_') if self.config_class is None: self.config_class = f'{self.model_camel_cased}Config' ATTRIBUTE_TO_PLACEHOLDER = {'config_class': '[CONFIG_CLASS]', 'tokenizer_class': '[TOKENIZER_CLASS]', 'image_processor_class': '[IMAGE_PROCESSOR_CLASS]', 'feature_extractor_class': '[FEATURE_EXTRACTOR_CLASS]', 'processor_class': '[PROCESSOR_CLASS]', 'checkpoint': '[CHECKPOINT]', 'model_type': '[MODEL_TYPE]', 'model_upper_cased': '[MODEL_UPPER_CASED]', 'model_camel_cased': '[MODEL_CAMELCASED]', 'model_lower_cased': '[MODEL_LOWER_CASED]', 'model_name': '[MODEL_NAME]'} def is_empty_line(line: str) -> bool: return len(line) == 0 or line.isspace() def find_indent(line: str) -> int: search = re.search('^(\\s*)(?:\\S|$)', line) if search is None: return 0 return len(search.groups()[0]) def parse_module_content(content: str) -> List[str]: objects = [] current_object = [] lines = content.split('\n') end_markers = [')', ']', '}', '"""'] for line in lines: is_valid_object = len(current_object) > 0 if is_valid_object and len(current_object) == 1: is_valid_object = not current_object[0].startswith('# Copied from') if not is_empty_line(line) and find_indent(line) == 0 and is_valid_object: if line in end_markers: current_object.append(line) objects.append('\n'.join(current_object)) current_object = [] else: objects.append('\n'.join(current_object)) current_object = [line] else: current_object.append(line) if len(current_object) > 0: objects.append('\n'.join(current_object)) return objects def extract_block(content: str, indent_level: int=0) -> str: current_object = [] lines = content.split('\n') end_markers = [')', ']', '}', '"""'] for (idx, line) in enumerate(lines): if idx == 0 and indent_level > 0 and (not is_empty_line(line)) and (find_indent(line) != indent_level): raise ValueError(f'When `indent_level > 0`, the first line in `content` should have indent level {indent_level}. Got {find_indent(line)} instead.') if find_indent(line) < indent_level and (not is_empty_line(line)): break is_valid_object = len(current_object) > 0 if not is_empty_line(line) and (not line.endswith(':')) and (find_indent(line) == indent_level) and is_valid_object: if line.lstrip() in end_markers: current_object.append(line) return '\n'.join(current_object) else: current_object.append(line) if len(current_object) > 0: return '\n'.join(current_object) def add_content_to_text(text: str, content: str, add_after: Optional[Union[str, Pattern]]=None, add_before: Optional[Union[str, Pattern]]=None, exact_match: bool=False) -> str: if add_after is None and add_before is None: raise ValueError('You need to pass either `add_after` or `add_before`') if add_after is not None and add_before is not None: raise ValueError("You can't pass both `add_after` or `add_before`") pattern = add_after if add_before is None else add_before def this_is_the_line(line): if isinstance(pattern, Pattern): return pattern.search(line) is not None elif exact_match: return pattern == line else: return pattern in line new_lines = [] for line in text.split('\n'): if this_is_the_line(line): if add_before is not None: new_lines.append(content) new_lines.append(line) if add_after is not None: new_lines.append(content) else: new_lines.append(line) return '\n'.join(new_lines) def add_content_to_file(file_name: Union[str, os.PathLike], content: str, add_after: Optional[Union[str, Pattern]]=None, add_before: Optional[Union[str, Pattern]]=None, exact_match: bool=False): with open(file_name, 'r', encoding='utf-8') as f: old_content = f.read() new_content = add_content_to_text(old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match) with open(file_name, 'w', encoding='utf-8') as f: f.write(new_content) def replace_model_patterns(text: str, old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns) -> Tuple[str, str]: attributes_to_check = ['config_class'] for attr in ['tokenizer_class', 'image_processor_class', 'feature_extractor_class', 'processor_class']: if getattr(old_model_patterns, attr) is not None and getattr(new_model_patterns, attr) is not None: attributes_to_check.append(attr) if old_model_patterns.checkpoint not in [old_model_patterns.model_type, old_model_patterns.model_lower_cased]: attributes_to_check.append('checkpoint') if old_model_patterns.model_type != old_model_patterns.model_lower_cased: attributes_to_check.append('model_type') else: text = re.sub(f'(\\s*)model_type = "{old_model_patterns.model_type}"', '\\1model_type = "[MODEL_TYPE]"', text) if old_model_patterns.model_upper_cased == old_model_patterns.model_camel_cased: old_model_value = old_model_patterns.model_upper_cased if re.search(f'{old_model_value}_[A-Z_]*[^A-Z_]', text) is not None: text = re.sub(f'{old_model_value}([A-Z_]*)([^a-zA-Z_])', '[MODEL_UPPER_CASED]\\1\\2', text) else: attributes_to_check.append('model_upper_cased') attributes_to_check.extend(['model_camel_cased', 'model_lower_cased', 'model_name']) for attr in attributes_to_check: text = text.replace(getattr(old_model_patterns, attr), ATTRIBUTE_TO_PLACEHOLDER[attr]) replacements = [] for (attr, placeholder) in ATTRIBUTE_TO_PLACEHOLDER.items(): if placeholder in text: replacements.append((getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))) text = text.replace(placeholder, getattr(new_model_patterns, attr)) old_replacement_values = [old for (old, new) in replacements] if len(set(old_replacement_values)) != len(old_replacement_values): return (text, '') replacements = simplify_replacements(replacements) replacements = [f'{old}->{new}' for (old, new) in replacements] return (text, ','.join(replacements)) def simplify_replacements(replacements): if len(replacements) <= 1: return replacements replacements.sort(key=lambda x: len(x[0])) idx = 0 while idx < len(replacements): (old, new) = replacements[idx] j = idx + 1 while j < len(replacements): (old_2, new_2) = replacements[j] if old_2.replace(old, new) == new_2: replacements.pop(j) else: j += 1 idx += 1 return replacements def get_module_from_file(module_file: Union[str, os.PathLike]) -> str: full_module_path = Path(module_file).absolute() module_parts = full_module_path.with_suffix('').parts idx = len(module_parts) - 1 while idx >= 0 and module_parts[idx] != 'transformers': idx -= 1 if idx < 0: raise ValueError(f'{module_file} is not a transformers module.') return '.'.join(module_parts[idx:]) SPECIAL_PATTERNS = {'_CHECKPOINT_FOR_DOC =': 'checkpoint', '_CONFIG_FOR_DOC =': 'config_class', '_TOKENIZER_FOR_DOC =': 'tokenizer_class', '_IMAGE_PROCESSOR_FOR_DOC =': 'image_processor_class', '_FEAT_EXTRACTOR_FOR_DOC =': 'feature_extractor_class', '_PROCESSOR_FOR_DOC =': 'processor_class'} _re_class_func = re.compile('^(?:class|def)\\s+([^\\s:\\(]+)\\s*(?:\\(|\\:)', flags=re.MULTILINE) def remove_attributes(obj, target_attr): lines = obj.split(os.linesep) target_idx = None for (idx, line) in enumerate(lines): if line.lstrip().startswith(f'{target_attr} = '): target_idx = idx break elif line.lstrip().startswith(f'def {target_attr}('): target_idx = idx break if target_idx is None: return obj line = lines[target_idx] indent_level = find_indent(line) parsed = extract_block('\n'.join(lines[target_idx:]), indent_level) num_lines = len(parsed.split('\n')) for idx in range(num_lines): lines[target_idx + idx] = None for idx in range(target_idx - 1, -1, -1): line = lines[idx] if (line.lstrip().startswith('#') or line.lstrip().startswith('@')) and find_indent(line) == indent_level: lines[idx] = None else: break new_obj = os.linesep.join([x for x in lines if x is not None]) return new_obj def duplicate_module(module_file: Union[str, os.PathLike], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[str]=None, add_copied_from: bool=True, attrs_to_remove: List[str]=None): if dest_file is None: dest_file = str(module_file).replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased) with open(module_file, 'r', encoding='utf-8') as f: content = f.read() content = re.sub('# Copyright (\\d+)\\s', f'# Copyright {CURRENT_YEAR} ', content) objects = parse_module_content(content) new_objects = [] for obj in objects: special_pattern = False for (pattern, attr) in SPECIAL_PATTERNS.items(): if pattern in obj: obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)) new_objects.append(obj) special_pattern = True break if special_pattern: continue old_obj = obj (obj, replacement) = replace_model_patterns(obj, old_model_patterns, new_model_patterns) has_copied_from = re.search('^#\\s+Copied from', obj, flags=re.MULTILINE) is not None if add_copied_from and (not has_copied_from) and (_re_class_func.search(obj) is not None) and (len(replacement) > 0): module_name = get_module_from_file(module_file) old_object_name = _re_class_func.search(old_obj).groups()[0] obj = add_content_to_text(obj, f'# Copied from {module_name}.{old_object_name} with {replacement}', add_before=_re_class_func) obj = re.sub('\n[ ]+# Copied from [^\n]*\n', '\n', obj) new_objects.append(obj) content = '\n'.join(new_objects) if attrs_to_remove is not None: for attr in attrs_to_remove: content = remove_attributes(content, target_attr=attr) with open(dest_file, 'w', encoding='utf-8') as f: f.write(content) def filter_framework_files(files: List[Union[str, os.PathLike]], frameworks: Optional[List[str]]=None) -> List[Union[str, os.PathLike]]: if frameworks is None: frameworks = get_default_frameworks() framework_to_file = {} others = [] for f in files: parts = Path(f).name.split('_') if 'modeling' not in parts: others.append(f) continue if 'tf' in parts: framework_to_file['tf'] = f elif 'flax' in parts: framework_to_file['flax'] = f else: framework_to_file['pt'] = f return [framework_to_file[f] for f in frameworks if f in framework_to_file] + others def get_model_files(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, Union[Path, List[Path]]]: module_name = model_type_to_module_name(model_type) model_module = TRANSFORMERS_PATH / 'models' / module_name model_files = list(model_module.glob('*.py')) model_files = filter_framework_files(model_files, frameworks=frameworks) doc_file = REPO_PATH / 'docs' / 'source' / 'en' / 'model_doc' / f'{model_type}.md' test_files = [f'test_modeling_{module_name}.py', f'test_modeling_tf_{module_name}.py', f'test_modeling_flax_{module_name}.py', f'test_tokenization_{module_name}.py', f'test_image_processing_{module_name}.py', f'test_feature_extraction_{module_name}.py', f'test_processor_{module_name}.py'] test_files = filter_framework_files(test_files, frameworks=frameworks) test_files = [REPO_PATH / 'tests' / 'models' / module_name / f for f in test_files] test_files = [f for f in test_files if f.exists()] return {'doc_file': doc_file, 'model_files': model_files, 'module_name': module_name, 'test_files': test_files} _re_checkpoint_for_doc = re.compile('^_CHECKPOINT_FOR_DOC\\s+=\\s+(\\S*)\\s*$', flags=re.MULTILINE) def find_base_model_checkpoint(model_type: str, model_files: Optional[Dict[str, Union[Path, List[Path]]]]=None) -> str: if model_files is None: model_files = get_model_files(model_type) module_files = model_files['model_files'] for fname in module_files: if 'modeling' not in str(fname): continue with open(fname, 'r', encoding='utf-8') as f: content = f.read() if _re_checkpoint_for_doc.search(content) is not None: checkpoint = _re_checkpoint_for_doc.search(content).groups()[0] checkpoint = checkpoint.replace('"', '') checkpoint = checkpoint.replace("'", '') return checkpoint return '' def get_default_frameworks(): frameworks = [] if is_torch_available(): frameworks.append('pt') if is_tf_available(): frameworks.append('tf') if is_flax_available(): frameworks.append('flax') return frameworks _re_model_mapping = re.compile('MODEL_([A-Z_]*)MAPPING_NAMES') def retrieve_model_classes(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[str, List[str]]: if frameworks is None: frameworks = get_default_frameworks() modules = {'pt': auto_module.modeling_auto if is_torch_available() else None, 'tf': auto_module.modeling_tf_auto if is_tf_available() else None, 'flax': auto_module.modeling_flax_auto if is_flax_available() else None} model_classes = {} for framework in frameworks: new_model_classes = [] if modules[framework] is None: raise ValueError(f'You selected {framework} in the frameworks, but it is not installed.') model_mappings = [attr for attr in dir(modules[framework]) if _re_model_mapping.search(attr) is not None] for model_mapping_name in model_mappings: model_mapping = getattr(modules[framework], model_mapping_name) if model_type in model_mapping: new_model_classes.append(model_mapping[model_type]) if len(new_model_classes) > 0: model_classes[framework] = list(set(new_model_classes)) return model_classes def retrieve_info_for_model(model_type, frameworks: Optional[List[str]]=None): if model_type not in auto_module.MODEL_NAMES_MAPPING: raise ValueError(f'{model_type} is not a valid model type.') model_name = auto_module.MODEL_NAMES_MAPPING[model_type] config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type] if model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES: tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type] tokenizer_class = tokenizer_classes[0] if tokenizer_classes[0] is not None else tokenizer_classes[1] else: tokenizer_class = None image_processor_classes = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None) if isinstance(image_processor_classes, tuple): image_processor_class = image_processor_classes[0] else: image_processor_class = image_processor_classes feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None) processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None) model_files = get_model_files(model_type, frameworks=frameworks) model_camel_cased = config_class.replace('Config', '') available_frameworks = [] for fname in model_files['model_files']: if 'modeling_tf' in str(fname): available_frameworks.append('tf') elif 'modeling_flax' in str(fname): available_frameworks.append('flax') elif 'modeling' in str(fname): available_frameworks.append('pt') if frameworks is None: frameworks = get_default_frameworks() frameworks = [f for f in frameworks if f in available_frameworks] model_classes = retrieve_model_classes(model_type, frameworks=frameworks) model_upper_cased = model_camel_cased.upper() model_patterns = ModelPatterns(model_name, checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), model_type=model_type, model_camel_cased=model_camel_cased, model_lower_cased=model_files['module_name'], model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, image_processor_class=image_processor_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class) return {'frameworks': frameworks, 'model_classes': model_classes, 'model_files': model_files, 'model_patterns': model_patterns} def clean_frameworks_in_init(init_file: Union[str, os.PathLike], frameworks: Optional[List[str]]=None, keep_processing: bool=True): if frameworks is None: frameworks = get_default_frameworks() names = {'pt': 'torch'} to_remove = [names.get(f, f) for f in ['pt', 'tf', 'flax'] if f not in frameworks] if not keep_processing: to_remove.extend(['sentencepiece', 'tokenizers', 'vision']) if len(to_remove) == 0: return remove_pattern = '|'.join(to_remove) re_conditional_imports = re.compile(f'^\\s*if not is_({remove_pattern})_available\\(\\):\\s*$') re_try = re.compile('\\s*try:') re_else = re.compile('\\s*else:') re_is_xxx_available = re.compile(f'is_({remove_pattern})_available') with open(init_file, 'r', encoding='utf-8') as f: content = f.read() lines = content.split('\n') new_lines = [] idx = 0 while idx < len(lines): if re_conditional_imports.search(lines[idx]) is not None and re_try.search(lines[idx - 1]) is not None: new_lines.pop() idx += 1 while is_empty_line(lines[idx]) or re_else.search(lines[idx]) is None: idx += 1 idx += 1 indent = find_indent(lines[idx]) while find_indent(lines[idx]) >= indent or is_empty_line(lines[idx]): idx += 1 elif re_is_xxx_available.search(lines[idx]) is not None: line = lines[idx] for framework in to_remove: line = line.replace(f', is_{framework}_available', '') line = line.replace(f'is_{framework}_available, ', '') line = line.replace(f'is_{framework}_available,', '') line = line.replace(f'is_{framework}_available', '') if len(line.strip()) > 0: new_lines.append(line) idx += 1 elif keep_processing or (re.search('^\\s*"(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None and re.search('^\\s*from .(tokenization|processing|feature_extraction|image_processing)', lines[idx]) is None): new_lines.append(lines[idx]) idx += 1 else: idx += 1 with open(init_file, 'w', encoding='utf-8') as f: f.write('\n'.join(new_lines)) def add_model_to_main_init(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, frameworks: Optional[List[str]]=None, with_processing: bool=True): with open(TRANSFORMERS_PATH / '__init__.py', 'r', encoding='utf-8') as f: content = f.read() lines = content.split('\n') idx = 0 new_lines = [] framework = None while idx < len(lines): new_framework = False if not is_empty_line(lines[idx]) and find_indent(lines[idx]) == 0: framework = None elif lines[idx].lstrip().startswith('if not is_torch_available'): framework = 'pt' new_framework = True elif lines[idx].lstrip().startswith('if not is_tf_available'): framework = 'tf' new_framework = True elif lines[idx].lstrip().startswith('if not is_flax_available'): framework = 'flax' new_framework = True if new_framework: while lines[idx].strip() != 'else:': new_lines.append(lines[idx]) idx += 1 if framework is not None and frameworks is not None and (framework not in frameworks): new_lines.append(lines[idx]) idx += 1 elif re.search(f'models.{old_model_patterns.model_lower_cased}( |")', lines[idx]) is not None: block = [lines[idx]] indent = find_indent(lines[idx]) idx += 1 while find_indent(lines[idx]) > indent: block.append(lines[idx]) idx += 1 if lines[idx].strip() in [')', ']', '],']: block.append(lines[idx]) idx += 1 block = '\n'.join(block) new_lines.append(block) add_block = True if not with_processing: processing_classes = [old_model_patterns.tokenizer_class, old_model_patterns.image_processor_class, old_model_patterns.feature_extractor_class, old_model_patterns.processor_class] processing_classes = [c for c in processing_classes if c is not None] for processing_class in processing_classes: block = block.replace(f' "{processing_class}",', '') block = block.replace(f', "{processing_class}"', '') block = block.replace(f' {processing_class},', '') block = block.replace(f', {processing_class}', '') if processing_class in block: add_block = False if add_block: new_lines.append(replace_model_patterns(block, old_model_patterns, new_model_patterns)[0]) else: new_lines.append(lines[idx]) idx += 1 with open(TRANSFORMERS_PATH / '__init__.py', 'w', encoding='utf-8') as f: f.write('\n'.join(new_lines)) def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns): if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None: return with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'r', encoding='utf-8') as f: content = f.read() lines = content.split('\n') idx = 0 while not lines[idx].startswith(' TOKENIZER_MAPPING_NAMES = OrderedDict('): idx += 1 idx += 1 while not lines[idx].startswith('TOKENIZER_MAPPING = _LazyAutoMapping'): if lines[idx].endswith(','): block = lines[idx] else: block = [] while not lines[idx].startswith(' ),'): block.append(lines[idx]) idx += 1 block = '\n'.join(block) idx += 1 if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block: break new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type) new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class) new_lines = lines[:idx] + [new_block] + lines[idx:] with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'w', encoding='utf-8') as f: f.write('\n'.join(new_lines)) AUTO_CLASSES_PATTERNS = {'configuration_auto.py': [' ("{model_type}", "{model_name}"),', ' ("{model_type}", "{config_class}"),', ' ("{model_type}", "{pretrained_archive_map}"),'], 'feature_extraction_auto.py': [' ("{model_type}", "{feature_extractor_class}"),'], 'image_processing_auto.py': [' ("{model_type}", "{image_processor_class}"),'], 'modeling_auto.py': [' ("{model_type}", "{any_pt_class}"),'], 'modeling_tf_auto.py': [' ("{model_type}", "{any_tf_class}"),'], 'modeling_flax_auto.py': [' ("{model_type}", "{any_flax_class}"),'], 'processing_auto.py': [' ("{model_type}", "{processor_class}"),']} def add_model_to_auto_classes(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, model_classes: Dict[str, List[str]]): for filename in AUTO_CLASSES_PATTERNS: new_patterns = [] for pattern in AUTO_CLASSES_PATTERNS[filename]: if re.search('any_([a-z]*)_class', pattern) is not None: framework = re.search('any_([a-z]*)_class', pattern).groups()[0] if framework in model_classes: new_patterns.extend([pattern.replace('{' + f'any_{framework}_class' + '}', cls) for cls in model_classes[framework]]) elif '{config_class}' in pattern: new_patterns.append(pattern.replace('{config_class}', old_model_patterns.config_class)) elif '{image_processor_class}' in pattern: if old_model_patterns.image_processor_class is not None and new_model_patterns.image_processor_class is not None: new_patterns.append(pattern.replace('{image_processor_class}', old_model_patterns.image_processor_class)) elif '{feature_extractor_class}' in pattern: if old_model_patterns.feature_extractor_class is not None and new_model_patterns.feature_extractor_class is not None: new_patterns.append(pattern.replace('{feature_extractor_class}', old_model_patterns.feature_extractor_class)) elif '{processor_class}' in pattern: if old_model_patterns.processor_class is not None and new_model_patterns.processor_class is not None: new_patterns.append(pattern.replace('{processor_class}', old_model_patterns.processor_class)) else: new_patterns.append(pattern) for pattern in new_patterns: full_name = TRANSFORMERS_PATH / 'models' / 'auto' / filename old_model_line = pattern new_model_line = pattern for attr in ['model_type', 'model_name']: old_model_line = old_model_line.replace('{' + attr + '}', getattr(old_model_patterns, attr)) new_model_line = new_model_line.replace('{' + attr + '}', getattr(new_model_patterns, attr)) new_model_line = new_model_line.replace(old_model_patterns.model_camel_cased, new_model_patterns.model_camel_cased) add_content_to_file(full_name, new_model_line, add_after=old_model_line) insert_tokenizer_in_auto_module(old_model_patterns, new_model_patterns) DOC_OVERVIEW_TEMPLATE = '## Overview\n\nThe {model_name} model was proposed in []() by .\n\n\nThe abstract from the paper is the following:\n\n**\n\nTips:\n\n\n\nThis model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/).\nThe original code can be found [here]().\n\n' def duplicate_doc_file(doc_file: Union[str, os.PathLike], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[Union[str, os.PathLike]]=None, frameworks: Optional[List[str]]=None): with open(doc_file, 'r', encoding='utf-8') as f: content = f.read() content = re.sub('\n' AUTOGENERATED_KERAS_COMMENT = '\n\n' TASK_TAG_TO_NAME_MAPPING = {'fill-mask': 'Masked Language Modeling', 'image-classification': 'Image Classification', 'image-segmentation': 'Image Segmentation', 'multiple-choice': 'Multiple Choice', 'object-detection': 'Object Detection', 'question-answering': 'Question Answering', 'summarization': 'Summarization', 'table-question-answering': 'Table Question Answering', 'text-classification': 'Text Classification', 'text-generation': 'Causal Language Modeling', 'text2text-generation': 'Sequence-to-sequence Language Modeling', 'token-classification': 'Token Classification', 'translation': 'Translation', 'zero-shot-classification': 'Zero Shot Classification', 'automatic-speech-recognition': 'Automatic Speech Recognition', 'audio-classification': 'Audio Classification'} METRIC_TAGS = ['accuracy', 'bleu', 'f1', 'matthews_correlation', 'pearsonr', 'precision', 'recall', 'rouge', 'sacrebleu', 'spearmanr', 'wer'] def _listify(obj): if obj is None: return [] elif isinstance(obj, str): return [obj] else: return obj def _insert_values_as_list(metadata, name, values): if values is None: return metadata if isinstance(values, str): values = [values] values = [v for v in values if v is not None] if len(values) == 0: return metadata metadata[name] = values return metadata def infer_metric_tags_from_eval_results(eval_results): if eval_results is None: return {} result = {} for key in eval_results.keys(): if key.lower().replace(' ', '_') in METRIC_TAGS: result[key.lower().replace(' ', '_')] = key elif key.lower() == 'rouge1': result['rouge'] = key return result def _insert_value(metadata, name, value): if value is None: return metadata metadata[name] = value return metadata def is_hf_dataset(dataset): if not is_datasets_available(): return False from datasets import Dataset, IterableDataset return isinstance(dataset, (Dataset, IterableDataset)) def _get_mapping_values(mapping): result = [] for v in mapping.values(): if isinstance(v, (tuple, list)): result += list(v) else: result.append(v) return result @dataclass class TrainingSummary: model_name: str language: Optional[Union[str, List[str]]] = None license: Optional[str] = None tags: Optional[Union[str, List[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, List[str]]] = None dataset: Optional[Union[str, List[str]]] = None dataset_tags: Optional[Union[str, List[str]]] = None dataset_args: Optional[Union[str, List[str]]] = None dataset_metadata: Optional[Dict[str, Any]] = None eval_results: Optional[Dict[str, float]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[str, Any]] = None source: Optional[str] = 'trainer' def __post_init__(self): if self.license is None and (not is_offline_mode()) and (self.finetuned_from is not None) and (len(self.finetuned_from) > 0): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith('license:'): self.license = tag[8:] except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {'name': self.model_name} dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = dict(zip(dataset_tags, dataset_names)) dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = {task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING} model_index['results'] = [] if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for (task_tag, ds_tag) in all_possibilities: result = {} if task_tag is not None: result['task'] = {'name': task_mapping[task_tag], 'type': task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result['dataset'] = {'name': dataset_mapping[ds_tag], 'type': ds_tag, **metadata} if dataset_arg_mapping[ds_tag] is not None: result['dataset']['args'] = dataset_arg_mapping[ds_tag] if len(metric_mapping) > 0: result['metrics'] = [] for (metric_tag, metric_name) in metric_mapping.items(): result['metrics'].append({'name': metric_name, 'type': metric_tag, 'value': self.eval_results[metric_name]}) if 'task' in result and 'dataset' in result and ('metrics' in result): model_index['results'].append(result) else: logger.info(f'Dropping the following result as it does not have all the necessary fields:\n{result}') return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_value(metadata, 'library_name', 'transformers') metadata = _insert_values_as_list(metadata, 'language', self.language) metadata = _insert_value(metadata, 'license', self.license) if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and (len(self.finetuned_from) > 0): metadata = _insert_value(metadata, 'base_model', self.finetuned_from) metadata = _insert_values_as_list(metadata, 'tags', self.tags) metadata = _insert_values_as_list(metadata, 'datasets', self.dataset_tags) metadata = _insert_values_as_list(metadata, 'metrics', list(metric_mapping.keys())) metadata['model-index'] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = '' metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f'---\n{metadata}---\n' if self.source == 'trainer': model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f'\n# {self.model_name}\n\n' if self.finetuned_from is None: model_card += 'This model was trained from scratch on ' else: model_card += f'This model is a fine-tuned version of [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on ' if self.dataset is None: model_card += 'an unknown dataset.' elif isinstance(self.dataset, str): model_card += f'the {self.dataset} dataset.' elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f'the {self.dataset[0]} dataset.' else: model_card += ', '.join([f'the {ds}' for ds in self.dataset[:-1]]) + f' and the {self.dataset[-1]} datasets.' if self.eval_results is not None: model_card += '\nIt achieves the following results on the evaluation set:\n' model_card += '\n'.join([f'- {name}: {_maybe_round(value)}' for (name, value) in self.eval_results.items()]) model_card += '\n' model_card += '\n## Model description\n\nMore information needed\n' model_card += '\n## Intended uses & limitations\n\nMore information needed\n' model_card += '\n## Training and evaluation data\n\nMore information needed\n' model_card += '\n## Training procedure\n' model_card += '\n### Training hyperparameters\n' if self.hyperparameters is not None: model_card += '\nThe following hyperparameters were used during training:\n' model_card += '\n'.join([f'- {name}: {value}' for (name, value) in self.hyperparameters.items()]) model_card += '\n' else: model_card += '\nMore information needed\n' if self.eval_lines is not None: model_card += '\n### Training results\n\n' model_card += make_markdown_table(self.eval_lines) model_card += '\n' model_card += '\n### Framework versions\n\n' model_card += f'- Transformers {__version__}\n' if self.source == 'trainer' and is_torch_available(): import torch model_card += f'- Pytorch {torch.__version__}\n' elif self.source == 'keras' and is_tf_available(): import tensorflow as tf model_card += f'- TensorFlow {tf.__version__}\n' if is_datasets_available(): import datasets model_card += f'- Datasets {datasets.__version__}\n' if is_tokenizers_available(): import tokenizers model_card += f'- Tokenizers {tokenizers.__version__}\n' return model_card @classmethod def from_trainer(cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None): one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name if default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']: if dataset_metadata is None: dataset_metadata = [{'config': one_dataset.config_name, 'split': str(one_dataset.split)}] if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags if finetuned_from is None and hasattr(trainer.model.config, '_name_or_path') and (not os.path.isdir(trainer.model.config._name_or_path)): finetuned_from = trainer.model.config._name_or_path if tasks is None: model_class_name = trainer.model.__class__.__name__ for (task, mapping) in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from if tags is None: tags = ['generated_from_trainer'] elif isinstance(tags, str) and tags != 'generated_from_trainer': tags = [tags, 'generated_from_trainer'] elif 'generated_from_trainer' not in tags: tags.append('generated_from_trainer') (_, eval_lines, eval_results) = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters) @classmethod def from_keras(cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None): if dataset is not None: if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): default_tag = dataset.builder_name if default_tag not in ['csv', 'json', 'pandas', 'parquet', 'text']: if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags if finetuned_from is None and hasattr(model.config, '_name_or_path') and (not os.path.isdir(model.config._name_or_path)): finetuned_from = model.config._name_or_path if tasks is None: model_class_name = model.__class__.__name__ for (task, mapping) in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if tags is None: tags = ['generated_from_keras_callback'] elif isinstance(tags, str) and tags != 'generated_from_keras_callback': tags = [tags, 'generated_from_keras_callback'] elif 'generated_from_keras_callback' not in tags: tags.append('generated_from_keras_callback') if keras_history is not None: (_, eval_lines, eval_results) = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls(language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source='keras') def parse_keras_history(logs): if hasattr(logs, 'history'): if not hasattr(logs, 'epoch'): return (None, [], {}) logs.history['epoch'] = logs.epoch logs = logs.history else: logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]} lines = [] for i in range(len(logs['epoch'])): epoch_dict = {log_key: log_value_list[i] for (log_key, log_value_list) in logs.items()} values = {} for (k, v) in epoch_dict.items(): if k.startswith('val_'): k = 'validation_' + k[4:] elif k != 'epoch': k = 'train_' + k splits = k.split('_') name = ' '.join([part.capitalize() for part in splits]) values[name] = v lines.append(values) eval_results = lines[-1] return (logs, lines, eval_results) def parse_log_history(log_history): idx = 0 while idx < len(log_history) and 'train_runtime' not in log_history[idx]: idx += 1 if idx == len(log_history): idx -= 1 while idx >= 0 and 'eval_loss' not in log_history[idx]: idx -= 1 if idx >= 0: return (None, None, log_history[idx]) else: return (None, None, None) train_log = log_history[idx] lines = [] training_loss = 'No log' for i in range(idx): if 'loss' in log_history[i]: training_loss = log_history[i]['loss'] if 'eval_loss' in log_history[i]: metrics = log_history[i].copy() _ = metrics.pop('total_flos', None) epoch = metrics.pop('epoch', None) step = metrics.pop('step', None) _ = metrics.pop('eval_runtime', None) _ = metrics.pop('eval_samples_per_second', None) _ = metrics.pop('eval_steps_per_second', None) _ = metrics.pop('eval_jit_compilation_time', None) values = {'Training Loss': training_loss, 'Epoch': epoch, 'Step': step} for (k, v) in metrics.items(): if k == 'eval_loss': values['Validation Loss'] = v else: splits = k.split('_') name = ' '.join([part.capitalize() for part in splits[1:]]) values[name] = v lines.append(values) idx = len(log_history) - 1 while idx >= 0 and 'eval_loss' not in log_history[idx]: idx -= 1 if idx > 0: eval_results = {} for (key, value) in log_history[idx].items(): if key.startswith('eval_'): key = key[5:] if key not in ['runtime', 'samples_per_second', 'steps_per_second', 'epoch', 'step']: camel_cased_key = ' '.join([part.capitalize() for part in key.split('_')]) eval_results[camel_cased_key] = value return (train_log, lines, eval_results) else: return (train_log, lines, None) def extract_hyperparameters_from_keras(model): from .modeling_tf_utils import keras hyperparameters = {} if hasattr(model, 'optimizer') and model.optimizer is not None: hyperparameters['optimizer'] = model.optimizer.get_config() else: hyperparameters['optimizer'] = None hyperparameters['training_precision'] = keras.mixed_precision.global_policy().name return hyperparameters def _maybe_round(v, decimals=4): if isinstance(v, float) and len(str(v).split('.')) > 1 and (len(str(v).split('.')[1]) > decimals): return f'{v:.{decimals}f}' return str(v) def _regular_table_line(values, col_widths): values_with_space = [f'| {v}' + ' ' * (w - len(v) + 1) for (v, w) in zip(values, col_widths)] return ''.join(values_with_space) + '|\n' def _second_table_line(col_widths): values = ['|:' + '-' * w + ':' for w in col_widths] return ''.join(values) + '|\n' def make_markdown_table(lines): if lines is None or len(lines) == 0: return '' col_widths = {key: len(str(key)) for key in lines[0].keys()} for line in lines: for (key, value) in line.items(): if col_widths[key] < len(_maybe_round(value)): col_widths[key] = len(_maybe_round(value)) table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) table += _second_table_line(list(col_widths.values())) for line in lines: table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) return table _TRAINING_ARGS_KEYS = ['learning_rate', 'train_batch_size', 'eval_batch_size', 'seed'] def extract_hyperparameters_from_trainer(trainer): hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: hyperparameters['distributed_type'] = 'multi-GPU' if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value if trainer.args.world_size > 1: hyperparameters['num_devices'] = trainer.args.world_size if trainer.args.gradient_accumulation_steps > 1: hyperparameters['gradient_accumulation_steps'] = trainer.args.gradient_accumulation_steps total_train_batch_size = trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps if total_train_batch_size != hyperparameters['train_batch_size']: hyperparameters['total_train_batch_size'] = total_train_batch_size total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size if total_eval_batch_size != hyperparameters['eval_batch_size']: hyperparameters['total_eval_batch_size'] = total_eval_batch_size if trainer.args.adafactor: hyperparameters['optimizer'] = 'Adafactor' else: hyperparameters['optimizer'] = f'Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and epsilon={trainer.args.adam_epsilon}' hyperparameters['lr_scheduler_type'] = trainer.args.lr_scheduler_type.value if trainer.args.warmup_ratio != 0.0: hyperparameters['lr_scheduler_warmup_ratio'] = trainer.args.warmup_ratio if trainer.args.warmup_steps != 0.0: hyperparameters['lr_scheduler_warmup_steps'] = trainer.args.warmup_steps if trainer.args.max_steps != -1: hyperparameters['training_steps'] = trainer.args.max_steps else: hyperparameters['num_epochs'] = trainer.args.num_train_epochs if trainer.args.fp16: if trainer.use_apex: hyperparameters['mixed_precision_training'] = f'Apex, opt level {trainer.args.fp16_opt_level}' else: hyperparameters['mixed_precision_training'] = 'Native AMP' if trainer.args.label_smoothing_factor != 0.0: hyperparameters['label_smoothing_factor'] = trainer.args.label_smoothing_factor return hyperparameters # File: transformers-main/src/transformers/modeling_attn_mask_utils.py from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from .utils.import_utils import is_torchdynamo_compiling @dataclass class AttentionMaskConverter: is_causal: bool sliding_window: int def __init__(self, is_causal: bool, sliding_window: Optional[int]=None): self.is_causal = is_causal self.sliding_window = sliding_window if self.sliding_window is not None and self.sliding_window <= 0: raise ValueError(f'Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`') def to_causal_4d(self, batch_size: int, query_length: int, key_value_length: int, dtype: torch.dtype, device: Union[torch.device, 'str']='cpu') -> Optional[torch.Tensor]: if not self.is_causal: raise ValueError(f'Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.') input_shape = (batch_size, query_length) past_key_values_length = key_value_length - query_length causal_4d_mask = None if input_shape[-1] > 1 or self.sliding_window is not None: causal_4d_mask = self._make_causal_mask(input_shape, dtype, device=device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window) return causal_4d_mask def to_4d(self, attention_mask_2d: torch.Tensor, query_length: int, dtype: torch.dtype, key_value_length: Optional[int]=None) -> torch.Tensor: input_shape = (attention_mask_2d.shape[0], query_length) causal_4d_mask = None if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: if key_value_length is None: raise ValueError('This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask.') past_key_values_length = key_value_length - query_length causal_4d_mask = self._make_causal_mask(input_shape, dtype, device=attention_mask_2d.device, past_key_values_length=past_key_values_length, sliding_window=self.sliding_window) elif self.sliding_window is not None: raise NotImplementedError('Sliding window is currently only implemented for causal masking') expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(attention_mask_2d.device) if causal_4d_mask is not None: expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min) expanded_4d_mask = expanded_attn_mask return expanded_4d_mask @staticmethod def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int=0, sliding_window: Optional[int]=None): (bsz, tgt_len) = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) if sliding_window is not None: diagonal = past_key_values_length - sliding_window - 1 context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal) mask.masked_fill_(context_mask, torch.finfo(dtype).min) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) @staticmethod def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (bsz, src_len) = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) @staticmethod def _unmask_unattended(expanded_mask: torch.FloatTensor, min_dtype: float): if expanded_mask.dtype == torch.bool: raise ValueError('AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor.') return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True)) @staticmethod def _ignore_causal_mask_sdpa(attention_mask: Optional[torch.Tensor], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None, is_training: bool=False) -> bool: (_, query_length) = (inputs_embeds.shape[0], inputs_embeds.shape[1]) key_value_length = query_length + past_key_values_length is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = False if attention_mask is None: if (is_training or not is_tracing) and (query_length == 1 or key_value_length == query_length) and (sliding_window is None or key_value_length < sliding_window): ignore_causal_mask = True elif sliding_window is None or key_value_length < sliding_window: if len(attention_mask.shape) == 4: return False elif (is_training or not is_tracing) and torch.all(attention_mask == 1): if query_length == 1 or key_value_length == query_length: ignore_causal_mask = True return ignore_causal_mask def _prepare_4d_causal_attention_mask(attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, Tuple, List], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None): attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = input_shape[-1] + past_key_values_length if attention_mask is not None and len(attention_mask.shape) == 2: attention_mask = attn_mask_converter.to_4d(attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype) elif attention_mask is not None and len(attention_mask.shape) == 4: expected_shape = (input_shape[0], 1, input_shape[1], key_value_length) if tuple(attention_mask.shape) != expected_shape: raise ValueError(f'Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}.') else: inverted_mask = 1.0 - attention_mask attention_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min) else: attention_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device) return attention_mask def _prepare_4d_causal_attention_mask_for_sdpa(attention_mask: Optional[torch.Tensor], input_shape: Union[torch.Size, Tuple, List], inputs_embeds: torch.Tensor, past_key_values_length: int, sliding_window: Optional[int]=None): attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = input_shape[-1] + past_key_values_length is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling() ignore_causal_mask = AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask=attention_mask, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, sliding_window=sliding_window) if ignore_causal_mask: expanded_4d_mask = None elif attention_mask is None: expanded_4d_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device) else: if attention_mask.dim() == 4: expanded_4d_mask = attention_mask else: expanded_4d_mask = attn_mask_converter.to_4d(attention_mask, input_shape[-1], dtype=inputs_embeds.dtype, key_value_length=key_value_length) if not is_tracing and expanded_4d_mask.device.type == 'cuda': expanded_4d_mask = AttentionMaskConverter._unmask_unattended(expanded_4d_mask, min_dtype=torch.finfo(inputs_embeds.dtype).min) return expanded_4d_mask def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) def _prepare_4d_attention_mask_for_sdpa(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None): (_, key_value_length) = mask.shape tgt_len = tgt_len if tgt_len is not None else key_value_length is_tracing = torch.jit.is_tracing() or isinstance(mask, torch.fx.Proxy) or is_torchdynamo_compiling() if not is_tracing and torch.all(mask == 1): return None else: return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len) def _create_4d_causal_attention_mask(input_shape: Union[torch.Size, Tuple, List], dtype: torch.dtype, device: torch.device, past_key_values_length: int=0, sliding_window: Optional[int]=None) -> Optional[torch.Tensor]: attn_mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=sliding_window) key_value_length = past_key_values_length + input_shape[-1] attention_mask = attn_mask_converter.to_causal_4d(input_shape[0], input_shape[-1], key_value_length, dtype=dtype, device=device) return attention_mask # File: transformers-main/src/transformers/modeling_flash_attention_utils.py import inspect import os from typing import Optional, Tuple import torch import torch.nn.functional as F from .utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal if is_flash_attn_2_available(): from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input from flash_attn import flash_attn_func, flash_attn_varlen_func _flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters) def _get_unpad_data(attention_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, int]: seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) return (indices, cu_seqlens, max_seqlen_in_batch) def _upad_input(query_layer: torch.Tensor, key_layer: torch.Tensor, value_layer: torch.Tensor, attention_mask: torch.Tensor, query_length: int): (indices_k, cu_seqlens_k, max_seqlen_in_batch_k) = _get_unpad_data(attention_mask) (batch_size, kv_seq_len, num_key_value_heads, head_dim) = key_layer.shape key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) if query_length == kv_seq_len: query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, -1, head_dim), indices_k) cu_seqlens_q = cu_seqlens_k max_seqlen_in_batch_q = max_seqlen_in_batch_k indices_q = indices_k elif query_length == 1: max_seqlen_in_batch_q = 1 cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=query_layer.device) indices_q = cu_seqlens_q[:-1] query_layer = query_layer.squeeze(1) else: attention_mask = attention_mask[:, -query_length:] (query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q) = unpad_input(query_layer, attention_mask) return (query_layer, key_layer, value_layer, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_in_batch_q, max_seqlen_in_batch_k)) def prepare_fa2_from_position_ids(query, key, value, position_ids): query = query.view(-1, query.size(-2), query.size(-1)) key = key.view(-1, key.size(-2), key.size(-1)) value = value.view(-1, value.size(-2), value.size(-1)) position_ids = position_ids.flatten() indices_q = torch.arange(position_ids.size(0), device=position_ids.device, dtype=torch.int32) cu_seq_lens = torch.cat((indices_q[position_ids == 0], torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32))) max_length = position_ids.max() + 1 return (query, key, value, indices_q, (cu_seq_lens, cu_seq_lens), (max_length, max_length)) def _flash_attention_forward(query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: torch.Tensor, query_length: int, is_causal: bool, dropout: float=0.0, position_ids: Optional[torch.Tensor]=None, softmax_scale: Optional[float]=None, sliding_window: Optional[int]=None, use_top_left_mask: bool=False, softcap: Optional[float]=None, deterministic: bool=None): if not use_top_left_mask: causal = is_causal else: causal = is_causal and query_length != 1 use_sliding_windows = _flash_supports_window_size and sliding_window is not None and (key_states.shape[1] > sliding_window) flash_kwargs = {'window_size': (sliding_window, sliding_window)} if use_sliding_windows else {} if is_flash_attn_greater_or_equal('2.4.1'): if deterministic is None: deterministic = os.environ.get('FLASH_ATTENTION_DETERMINISTIC', '0') == '1' flash_kwargs['deterministic'] = deterministic if softcap is not None: flash_kwargs['softcap'] = softcap if attention_mask is not None: batch_size = query_states.shape[0] (query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens) = _upad_input(query_states, key_states, value_states, attention_mask, query_length) (cu_seqlens_q, cu_seqlens_k) = cu_seq_lens (max_seqlen_in_batch_q, max_seqlen_in_batch_k) = max_seq_lens attn_output_unpad = flash_attn_varlen_func(query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, **flash_kwargs) attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) elif position_ids is not None and (not (torch.diff(position_ids, dim=-1) >= 0).all()) and (query_length != 1): batch_size = query_states.size(0) (query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens) = prepare_fa2_from_position_ids(query_states, key_states, value_states, position_ids) (cu_seqlens_q, cu_seqlens_k) = cu_seq_lens (max_seqlen_in_batch_q, max_seqlen_in_batch_k) = max_seq_lens attn_output = flash_attn_varlen_func(query_states, key_states, value_states, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_in_batch_q, max_seqlen_k=max_seqlen_in_batch_k, dropout_p=dropout, softmax_scale=softmax_scale, causal=causal, **flash_kwargs) attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1)) else: attn_output = flash_attn_func(query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal, **flash_kwargs) return attn_output # File: transformers-main/src/transformers/modeling_flax_outputs.py from typing import Dict, Optional, Tuple import flax import jax.numpy as jnp from .utils import ModelOutput @flax.struct.dataclass class FlaxBaseModelOutput(ModelOutput): last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithNoAttention(ModelOutput): last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndNoAttention(ModelOutput): last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxImageClassifierOutputWithNoAttention(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPast(ModelOutput): last_hidden_state: jnp.ndarray = None past_key_values: Optional[Dict[str, jnp.ndarray]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPooling(ModelOutput): last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): last_hidden_state: jnp.ndarray = None pooler_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput): last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqModelOutput(ModelOutput): last_hidden_state: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxCausalLMOutputWithCrossAttentions(ModelOutput): logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMaskedLMOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None FlaxCausalLMOutput = FlaxMaskedLMOutput @flax.struct.dataclass class FlaxSeq2SeqLMOutput(ModelOutput): logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxNextSentencePredictorOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSequenceClassifierOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput): logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxMultipleChoiceModelOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxTokenClassifierOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxQuestionAnsweringModelOutput(ModelOutput): start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput): start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None # File: transformers-main/src/transformers/modeling_flax_pytorch_utils.py """""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from . import is_safetensors_available, is_torch_available from .utils import logging if is_torch_available(): import torch if is_safetensors_available(): from safetensors import safe_open from safetensors.flax import load_file as safe_load_file logger = logging.get_logger(__name__) def load_pytorch_checkpoint_in_flax_state_dict(flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False): if not is_sharded: pt_path = os.path.abspath(pytorch_checkpoint_path) logger.info(f'Loading PyTorch weights from {pt_path}') if pt_path.endswith('.safetensors'): pt_state_dict = {} with safe_open(pt_path, framework='flax') as f: for k in f.keys(): pt_state_dict[k] = f.get_tensor(k) else: try: import torch from .pytorch_utils import is_torch_greater_or_equal_than_1_13 except (ImportError, ModuleNotFoundError): logger.error('Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions.') raise weights_only_kwarg = {'weights_only': True} if is_torch_greater_or_equal_than_1_13 else {} pt_state_dict = torch.load(pt_path, map_location='cpu', **weights_only_kwarg) logger.info(f'PyTorch checkpoint contains {sum((t.numel() for t in pt_state_dict.values())):,} parameters.') flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) else: flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model) return flax_state_dict def rename_key_and_reshape_tensor(pt_tuple_key: Tuple[str], pt_tensor: np.ndarray, random_flax_state_dict: Dict[str, jnp.ndarray], model_prefix: str) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(key: Tuple[str]) -> bool: return len(set(random_flax_state_dict) & {key, (model_prefix,) + key}) > 0 renamed_pt_tuple_key = pt_tuple_key[:-1] + ('scale',) if pt_tuple_key[-1] in ['weight', 'gamma'] and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('mean',) if pt_tuple_key[-1] == 'running_mean' and (not is_key_or_prefix_key_in_dict(pt_tuple_key)): return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('var',) if pt_tuple_key[-1] == 'running_var' and (not is_key_or_prefix_key_in_dict(pt_tuple_key)): return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('embedding',) if pt_tuple_key[-1] == 'weight' and is_key_or_prefix_key_in_dict(renamed_pt_tuple_key): return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == 'weight' and pt_tensor.ndim == 4 and (not is_key_or_prefix_key_in_dict(pt_tuple_key)): pt_tensor = pt_tensor.transpose(2, 3, 1, 0) return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == 'weight' and (not is_key_or_prefix_key_in_dict(pt_tuple_key)): pt_tensor = pt_tensor.T return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == 'gamma': return (renamed_pt_tuple_key, pt_tensor) renamed_pt_tuple_key = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == 'beta': return (renamed_pt_tuple_key, pt_tensor) name = None if pt_tuple_key[-3::2] == ('parametrizations', 'original0'): name = pt_tuple_key[-2] + '_g' elif pt_tuple_key[-3::2] == ('parametrizations', 'original1'): name = pt_tuple_key[-2] + '_v' if name is not None: renamed_pt_tuple_key = pt_tuple_key[:-3] + (name,) return (renamed_pt_tuple_key, pt_tensor) return (pt_tuple_key, pt_tensor) def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): from_bin = is_torch_available() and isinstance(next(iter(pt_state_dict.values())), torch.Tensor) bfloat16 = torch.bfloat16 if from_bin else 'bfloat16' weight_dtypes = {k: v.dtype for (k, v) in pt_state_dict.items()} if from_bin: for (k, v) in pt_state_dict.items(): if v.dtype == bfloat16: v = v.float() pt_state_dict[k] = v.numpy() model_prefix = flax_model.base_model_prefix if 'params' in flax_model.params: flax_model_params = flax_model.params['params'] else: flax_model_params = flax_model.params random_flax_state_dict = flatten_dict(flax_model_params) if 'batch_stats' in flax_model.params: flax_batch_stats = flatten_dict(flax_model.params['batch_stats']) random_flax_state_dict.update(flax_batch_stats) flax_state_dict = {} load_model_with_head_into_base_model = model_prefix not in flax_model_params and model_prefix in {k.split('.')[0] for k in pt_state_dict.keys()} load_base_model_into_model_with_head = model_prefix in flax_model_params and model_prefix not in {k.split('.')[0] for k in pt_state_dict.keys()} for (pt_key, pt_tensor) in pt_state_dict.items(): pt_tuple_key = tuple(pt_key.split('.')) is_bfloat_16 = weight_dtypes[pt_key] == bfloat16 has_base_model_prefix = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: pt_tuple_key = pt_tuple_key[1:] (flax_key, flax_tensor) = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix) require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: flax_key = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError(f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape {random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.') if 'batch_stats' in flax_model.params: if 'mean' in flax_key[-1] or 'var' in flax_key[-1]: flax_state_dict[('batch_stats',) + flax_key] = jnp.asarray(flax_tensor) continue if 'num_batches_tracked' in flax_key[-1]: flax_state_dict.pop(flax_key, None) continue flax_state_dict[('params',) + flax_key] = jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) else: flax_state_dict[flax_key] = jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) return unflatten_dict(flax_state_dict) def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model): import torch from .pytorch_utils import is_torch_greater_or_equal_than_1_13 flax_state_dict = {} for shard_file in shard_filenames: weights_only_kwarg = {'weights_only': True} if is_torch_greater_or_equal_than_1_13 else {} pt_state_dict = torch.load(shard_file, **weights_only_kwarg) weight_dtypes = {k: v.dtype for (k, v) in pt_state_dict.items()} pt_state_dict = {k: v.numpy() if v.dtype != torch.bfloat16 else v.float().numpy() for (k, v) in pt_state_dict.items()} model_prefix = flax_model.base_model_prefix if 'batch_stats' in flax_model.params: flax_model_params = flax_model.params['params'] random_flax_state_dict = flatten_dict(flax_model_params) random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'])) else: flax_model_params = flax_model.params random_flax_state_dict = flatten_dict(flax_model_params) load_model_with_head_into_base_model = model_prefix not in flax_model_params and model_prefix in {k.split('.')[0] for k in pt_state_dict.keys()} load_base_model_into_model_with_head = model_prefix in flax_model_params and model_prefix not in {k.split('.')[0] for k in pt_state_dict.keys()} for (pt_key, pt_tensor) in pt_state_dict.items(): pt_tuple_key = tuple(pt_key.split('.')) is_bfloat_16 = weight_dtypes[pt_key] == torch.bfloat16 has_base_model_prefix = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: pt_tuple_key = pt_tuple_key[1:] (flax_key, flax_tensor) = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix) require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: flax_key = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError(f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape {random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.') if 'batch_stats' in flax_model.params: if 'mean' in flax_key[-1]: flax_state_dict[('batch_stats',) + flax_key] = jnp.asarray(flax_tensor) continue if 'var' in flax_key[-1]: flax_state_dict[('batch_stats',) + flax_key] = jnp.asarray(flax_tensor) continue if 'num_batches_tracked' in flax_key[-1]: flax_state_dict.pop(flax_key, None) continue flax_state_dict[('params',) + flax_key] = jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) else: flax_state_dict[flax_key] = jnp.asarray(flax_tensor) if not is_bfloat_16 else jnp.asarray(flax_tensor, dtype=jnp.bfloat16) return unflatten_dict(flax_state_dict) def load_flax_checkpoint_in_pytorch_model(model, flax_checkpoint_path): flax_checkpoint_path = os.path.abspath(flax_checkpoint_path) logger.info(f'Loading Flax weights from {flax_checkpoint_path}') flax_cls = getattr(transformers, 'Flax' + model.__class__.__name__) if flax_checkpoint_path.endswith('.safetensors'): flax_state_dict = safe_load_file(flax_checkpoint_path) flax_state_dict = unflatten_dict(flax_state_dict, sep='.') else: with open(flax_checkpoint_path, 'rb') as state_f: try: flax_state_dict = from_bytes(flax_cls, state_f.read()) except UnpicklingError: raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ') return load_flax_weights_in_pytorch_model(model, flax_state_dict) def load_flax_weights_in_pytorch_model(pt_model, flax_state): try: import torch except (ImportError, ModuleNotFoundError): logger.error('Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions.') raise is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() if any(is_type_bf16): logger.warning('Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` before loading those in PyTorch model.') flax_state = jax.tree_util.tree_map(lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state) flax_state_dict = flatten_dict(flax_state) pt_model_dict = pt_model.state_dict() load_model_with_head_into_base_model = pt_model.base_model_prefix in flax_state and pt_model.base_model_prefix not in {k.split('.')[0] for k in pt_model_dict.keys()} load_base_model_into_model_with_head = pt_model.base_model_prefix not in flax_state and pt_model.base_model_prefix in {k.split('.')[0] for k in pt_model_dict.keys()} unexpected_keys = [] missing_keys = set(pt_model_dict.keys()) for (flax_key_tuple, flax_tensor) in flax_state_dict.items(): has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix require_base_model_prefix = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict if load_model_with_head_into_base_model and has_base_model_prefix: flax_key_tuple = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple if flax_key_tuple[-1] == 'kernel' and flax_tensor.ndim == 4 and ('.'.join(flax_key_tuple) not in pt_model_dict): flax_key_tuple = flax_key_tuple[:-1] + ('weight',) flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1)) elif flax_key_tuple[-1] == 'kernel' and '.'.join(flax_key_tuple) not in pt_model_dict: flax_key_tuple = flax_key_tuple[:-1] + ('weight',) flax_tensor = flax_tensor.T elif flax_key_tuple[-1] in ['scale', 'embedding']: flax_key_tuple = flax_key_tuple[:-1] + ('weight',) elif 'mean' in flax_key_tuple[-1]: flax_key_tuple = flax_key_tuple[:-1] + ('running_mean',) elif 'var' in flax_key_tuple[-1]: flax_key_tuple = flax_key_tuple[:-1] + ('running_var',) if 'batch_stats' in flax_state: flax_key = '.'.join(flax_key_tuple[1:]) else: flax_key = '.'.join(flax_key_tuple) special_pt_names = {} for key in pt_model_dict: key_components = key.split('.') name = None if key_components[-3::2] == ['parametrizations', 'original0']: name = key_components[-2] + '_g' elif key_components[-3::2] == ['parametrizations', 'original1']: name = key_components[-2] + '_v' if name is not None: key_components = key_components[:-3] + [name] key_to_check = '.'.join(key_components) special_pt_names[key_to_check] = key if flax_key in special_pt_names: flax_key = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError(f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.') else: flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor pt_model_dict[flax_key] = torch.from_numpy(flax_tensor) missing_keys.remove(flax_key) else: unexpected_keys.append(flax_key) pt_model.load_state_dict(pt_model_dict) missing_keys = list(missing_keys) if len(unexpected_keys) > 0: logger.warning(f'Some weights of the Flax model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model).') else: logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') else: logger.warning(f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {pt_model.__class__.__name__} for predictions without further training.') return pt_model # File: transformers-main/src/transformers/modeling_flax_utils.py import gc import json import os import re import warnings from functools import partial from pickle import UnpicklingError from typing import Any, Dict, Optional, Set, Tuple, Union import flax.linen as nn import jax import jax.numpy as jnp import msgpack.exceptions from flax.core.frozen_dict import FrozenDict, unfreeze from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation import FlaxGenerationMixin, GenerationConfig from .modeling_flax_pytorch_utils import load_pytorch_checkpoint_in_flax_state_dict from .utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, PushToHubMixin, add_code_sample_docstrings, add_start_docstrings_to_model_forward, cached_file, copy_func, download_url, has_file, is_offline_mode, is_remote_url, logging, replace_return_docstrings from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from .utils.import_utils import is_safetensors_available if is_safetensors_available(): from safetensors import safe_open from safetensors.flax import load_file as safe_load_file from safetensors.flax import save_file as safe_save_file logger = logging.get_logger(__name__) def quick_gelu(x): return x * jax.nn.sigmoid(1.702 * x) ACT2FN = {'gelu': partial(nn.gelu, approximate=False), 'relu': nn.relu, 'silu': nn.swish, 'swish': nn.swish, 'gelu_new': partial(nn.gelu, approximate=True), 'quick_gelu': quick_gelu, 'gelu_pytorch_tanh': partial(nn.gelu, approximate=True)} def dtype_byte_size(dtype): if dtype is bool: return 1 / 8 bit_search = re.search('[^\\d](\\d+)$', dtype.name) if bit_search is None: raise ValueError(f'`dtype` is not a valid dtype: {dtype}.') bit_size = int(bit_search.groups()[0]) return bit_size // 8 def flax_shard_checkpoint(params, max_shard_size='10GB'): max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = {} current_block_size = 0 total_size = 0 weights = flatten_dict(params, sep='/') for item in weights: weight_size = weights[item].size * dtype_byte_size(weights[item].dtype) if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = {} current_block_size = 0 current_block[item] = weights[item] current_block_size += weight_size total_size += weight_size sharded_state_dicts.append(current_block) if len(sharded_state_dicts) == 1: return ({FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None) weight_map = {} shards = {} for (idx, shard) in enumerate(sharded_state_dicts): shard_file = FLAX_WEIGHTS_NAME.replace('.msgpack', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.msgpack') shards[shard_file] = shard for weight_name in shard.keys(): weight_map[weight_name] = shard_file metadata = {'total_size': total_size} index = {'metadata': metadata, 'weight_map': weight_map} return (shards, index) class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin): config_class = None base_model_prefix = '' main_input_name = 'input_ids' _auto_class = None _missing_keys = set() def __init__(self, config: PretrainedConfig, module: nn.Module, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True): if config is None: raise ValueError('config cannot be None') if module is None: raise ValueError('module cannot be None') self._config = config self._module = module self.key = PRNGKey(seed) self.dtype = dtype self.input_shape = input_shape self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._is_initialized = _do_init if _do_init: random_params = self.init_weights(self.key, input_shape) params_shape_tree = jax.eval_shape(lambda params: params, random_params) else: init_fn = partial(self.init_weights, input_shape=input_shape) params_shape_tree = jax.eval_shape(init_fn, self.key) logger.info(f'Model weights are not initialized as `_do_init` is set to `False`. Make sure to call `{self.__class__.__name__}.init_weights` manually to initialize the weights.') self._params_shape_tree = params_shape_tree self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) if _do_init: self.params = random_params def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> Dict: raise NotImplementedError(f'init method has to be implemented for {self}') def enable_gradient_checkpointing(self): raise NotImplementedError(f'gradient checkpointing method has to be implemented for {self}') @classmethod def _from_config(cls, config, **kwargs): return cls(config, **kwargs) @property def framework(self) -> str: return 'flax' @property def config(self) -> PretrainedConfig: return self._config @property def module(self) -> nn.Module: return self._module @property def params(self) -> Union[Dict, FrozenDict]: if not self._is_initialized: raise ValueError('`params` cannot be accessed from model when the model is created with `_do_init=False`. You must call `init_weights` manually and store the params outside of the model and pass it explicitly where needed.') return self._params @property def required_params(self) -> Set: return self._required_params @property def params_shape_tree(self) -> Dict: return self._params_shape_tree @params.setter def params(self, params: Union[Dict, FrozenDict]): if not self._is_initialized: raise ValueError('`params` cannot be set from model when the model is created with `_do_init=False`. You store the params outside of the model.') if isinstance(params, FrozenDict): params = unfreeze(params) param_keys = set(flatten_dict(params).keys()) if len(self.required_params - param_keys) > 0: raise ValueError(f'Some parameters are missing. Make sure that `params` include the following parameters {self.required_params - param_keys}') self._params = params def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any=None) -> Any: def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_util.tree_map(conditional_cast, params) flat_params = flatten_dict(params) (flat_mask, _) = jax.tree_util.tree_flatten(mask) for (masked, key) in zip(flat_mask, sorted(flat_params.keys())): if masked: flat_params[key] = conditional_cast(flat_params[key]) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any=None): return self._cast_floating_to(params, jnp.float16, mask) @classmethod def load_flax_weights(cls, resolved_archive_file): try: if resolved_archive_file.endswith('.safetensors'): state = safe_load_file(resolved_archive_file) state = unflatten_dict(state, sep='.') else: with open(resolved_archive_file, 'rb') as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(resolved_archive_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {resolved_archive_file} to Flax deserializable object. ') return state @classmethod def load_flax_sharded_weights(cls, shard_files): state_sharded_dict = {} for shard_file in shard_files: try: with open(shard_file, 'rb') as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: with open(shard_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {shard_file} to Flax deserializable object. ') state = flatten_dict(state, sep='/') state_sharded_dict.update(state) del state gc.collect() return unflatten_dict(state_sharded_dict, sep='/') @classmethod def can_generate(cls) -> bool: if 'GenerationMixin' in str(cls.prepare_inputs_for_generation) and 'GenerationMixin' in str(cls.generate): return False return True @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype=jnp.float32, *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs): from_pt = kwargs.pop('from_pt', False) resume_download = kwargs.pop('resume_download', None) proxies = kwargs.pop('proxies', None) use_auth_token = kwargs.pop('use_auth_token', None) trust_remote_code = kwargs.pop('trust_remote_code', None) from_pipeline = kwargs.pop('_from_pipeline', None) from_auto_class = kwargs.pop('_from_auto', False) _do_init = kwargs.pop('_do_init', True) subfolder = kwargs.pop('subfolder', '') commit_hash = kwargs.pop('_commit_hash', None) _ = kwargs.pop('adapter_kwargs', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if trust_remote_code is True: logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.') user_agent = {'file_type': 'model', 'framework': 'flax', 'from_auto_class': from_auto_class} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline if is_offline_mode() and (not local_files_only): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path (config, model_kwargs) = cls.config_class.from_pretrained(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs) else: model_kwargs = kwargs.copy() if commit_hash is None: commit_hash = getattr(config, '_commit_hash', None) model_kwargs['dtype'] = dtype is_sharded = False if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME) is_sharded = True elif is_safetensors_available() and os.path.isfile(os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME) is_sharded = True elif is_safetensors_available() and os.path.isfile(os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True raise NotImplementedError('Support for sharded checkpoints using safetensors is coming soon!') elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): raise EnvironmentError(f'Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.') else: raise EnvironmentError(f'Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}.') elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: if from_pt: filename = WEIGHTS_NAME else: filename = FLAX_WEIGHTS_NAME try: cached_file_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'resume_download': resume_download, 'local_files_only': local_files_only, 'token': token, 'user_agent': user_agent, 'revision': revision, 'subfolder': subfolder, '_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_commit_hash': commit_hash} resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME: resolved_archive_file = cached_file(pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None and from_pt: resolved_archive_file = cached_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: filename = SAFE_WEIGHTS_NAME resolved_archive_file = cached_file(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME, **cached_file_kwargs) if resolved_archive_file is None: has_file_kwargs = {'revision': revision, 'proxies': proxies, 'token': token, 'cache_dir': cache_dir, 'local_files_only': local_files_only} if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): is_sharded = True raise NotImplementedError('Support for sharded checkpoints using safetensors is coming soon!') elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.') elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use `from_pt=True` to load this model from those weights.') else: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.') except EnvironmentError: raise except Exception: raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.") if is_local: logger.info(f'loading weights file {archive_file}') resolved_archive_file = archive_file filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f'loading weights file {filename} from cache at {resolved_archive_file}') else: resolved_archive_file = None if is_sharded: (resolved_archive_file, _) = get_checkpoint_shard_files(pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash) safetensors_from_pt = False if filename == SAFE_WEIGHTS_NAME: with safe_open(resolved_archive_file, framework='flax') as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get('format') not in ['pt', 'tf', 'flax']: raise OSError(f'The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata. Make sure you save your model with the `save_pretrained` method.') safetensors_from_pt = safetensors_metadata.get('format') == 'pt' model = cls(config, *model_args, _do_init=_do_init, **model_kwargs) if from_pt or safetensors_from_pt: state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded) else: if is_sharded: state = cls.load_flax_sharded_weights(resolved_archive_file) else: state = cls.load_flax_weights(resolved_archive_file) if _do_init: state = jax.tree_util.tree_map(jnp.array, state) else: state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend='cpu')[0]), state) if 'batch_stats' in state: if cls.base_model_prefix not in dict(model.params_shape_tree['params']) and cls.base_model_prefix in state['params']: state['params'] = state['params'][cls.base_model_prefix] state['batch_stats'] = state['batch_stats'][cls.base_model_prefix] if cls.base_model_prefix in dict(model.params_shape_tree['params']) and cls.base_model_prefix not in state['params']: state = {'params': {cls.base_model_prefix: state['params']}, 'batch_stats': {cls.base_model_prefix: state['batch_stats']}} else: if cls.base_model_prefix not in dict(model.params_shape_tree) and cls.base_model_prefix in state: state = state[cls.base_model_prefix] if cls.base_model_prefix in dict(model.params_shape_tree) and cls.base_model_prefix not in state: state = {cls.base_model_prefix: state} state = flatten_dict(state) random_state = flatten_dict(unfreeze(model.params if _do_init else model.params_shape_tree)) missing_keys = model.required_params - set(state.keys()) unexpected_keys = set(state.keys()) - model.required_params for unexpected_key in unexpected_keys.copy(): if 'num_batches_tracked' in unexpected_key[-1]: unexpected_keys.remove(unexpected_key) if missing_keys and (not _do_init): logger.warning(f'The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. Make sure to call model.init_weights to initialize the missing weights.') cls._missing_keys = missing_keys mismatched_keys = [] for key in state.keys(): if key in random_state and state[key].shape != random_state[key].shape: if ignore_mismatched_sizes: mismatched_keys.append((key, state[key].shape, random_state[key].shape)) state[key] = random_state[key] else: raise ValueError(f'Trying to load the pretrained weight for {key} failed: checkpoint has shape {state[key].shape} which is incompatible with the model shape {random_state[key].shape}. Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this model.') if missing_keys and _do_init: for missing_key in missing_keys: state[missing_key] = random_state[missing_key] for unexpected_key in unexpected_keys: del state[unexpected_key] if len(unexpected_keys) > 0: logger.warning(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') param_dtypes = jax.tree_util.tree_map(lambda x: x.dtype, state) fp16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.float16] bf16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.bfloat16] if len(fp16_params) > 0: logger.warning(f'Some of the weights of {model.__class__.__name__} were initialized in float16 precision from the model checkpoint at {pretrained_model_name_or_path}:\n{fp16_params}\nYou should probably UPCAST the model weights to float32 if this was not intended. See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this.') if len(bf16_params) > 0: logger.warning(f'Some of the weights of {model.__class__.__name__} were initialized in bfloat16 precision from the model checkpoint at {pretrained_model_name_or_path}:\n{bf16_params}\nYou should probably UPCAST the model weights to float32 if this was not intended. See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this.') if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs) except OSError: logger.info('Generation config file not found, using a generation config created from the model config.') pass if _do_init: model.params = unflatten_dict(state) return model else: return (model, unflatten_dict(state)) def save_pretrained(self, save_directory: Union[str, os.PathLike], params=None, push_to_hub=False, max_shard_size='10GB', token: Optional[Union[str, bool]]=None, safe_serialization: bool=False, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None: kwargs['token'] = token if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) save_directory = os.path.abspath(save_directory) self.config.architectures = [self.__class__.__name__[4:]] if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) self.config.save_pretrained(save_directory) if self.can_generate(): self.generation_config.save_pretrained(save_directory) weights_name = SAFE_WEIGHTS_NAME if safe_serialization else FLAX_WEIGHTS_NAME output_model_file = os.path.join(save_directory, weights_name) (shards, index) = flax_shard_checkpoint(params if params is not None else self.params, max_shard_size) for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) weights_no_suffix = weights_name.replace('.bin', '').replace('.safetensors', '') if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and (filename not in shards.keys()): os.remove(full_filename) if index is None: if safe_serialization: params = params if params is not None else self.params flat_dict = flatten_dict(params, sep='.') safe_save_file(flat_dict, output_model_file, metadata={'format': 'flax'}) else: with open(output_model_file, 'wb') as f: params = params if params is not None else self.params model_bytes = to_bytes(params) f.write(model_bytes) else: save_index_file = os.path.join(save_directory, FLAX_WEIGHTS_INDEX_NAME) with open(save_index_file, 'w', encoding='utf-8') as f: content = json.dumps(index, indent=2, sort_keys=True) + '\n' f.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') for (shard_file, shard) in shards.items(): with open(os.path.join(save_directory, shard_file), mode='wb') as f: params = unflatten_dict(shard, sep='/') shard_bytes = to_bytes(params) f.write(shard_bytes) logger.info(f'Model weights saved in {output_model_file}') if push_to_hub: self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token) @classmethod def register_for_auto_class(cls, auto_class='FlaxAutoModel'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub) if FlaxPreTrainedModel.push_to_hub.__doc__ is not None: FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format(object='model', object_class='FlaxAutoModel', object_files='model checkpoint') def overwrite_call_docstring(model_class, docstring): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__.__doc__ = None model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__) def append_call_sample_docstring(model_class, checkpoint, output_type, config_class, mask=None, revision=None, real_checkpoint=None): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__ = add_code_sample_docstrings(checkpoint=checkpoint, output_type=output_type, config_class=config_class, model_cls=model_class.__name__, revision=revision, real_checkpoint=real_checkpoint)(model_class.__call__) def append_replace_return_docstrings(model_class, output_type, config_class): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__ = replace_return_docstrings(output_type=output_type, config_class=config_class)(model_class.__call__) # File: transformers-main/src/transformers/modeling_gguf_pytorch_utils.py from typing import Optional import numpy as np from tqdm import tqdm from .integrations import GGUF_CONFIG_MAPPING, GGUF_TENSOR_MAPPING, GGUF_TOKENIZER_MAPPING, _gguf_parse_value from .utils import is_torch_available from .utils.import_utils import is_gguf_available from .utils.logging import get_logger if is_torch_available(): import torch logger = get_logger(__name__) GGUF_TO_TRANSFORMERS_MAPPING = {'ignore': {'GGUF': {'version': 'version', 'tensor_count': 'tensor_count', 'kv_count': 'kv_count'}, 'general': {'file_type': 'file_type', 'quantization_version': 'quantization_version'}}, 'config': GGUF_CONFIG_MAPPING, 'tensors': GGUF_TENSOR_MAPPING, 'tokenizer': {'tokenizer': GGUF_TOKENIZER_MAPPING['tokenizer']}, 'tokenizer_config': {'tokenizer': GGUF_TOKENIZER_MAPPING['tokenizer_config']}} GGUF_SUPPORTED_ARCHITECTURES = list(GGUF_TO_TRANSFORMERS_MAPPING['tensors'].keys()) def read_field(reader, field): value = reader.fields[field] return [_gguf_parse_value(value.parts[_data_index], value.types) for _data_index in value.data] def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False): if is_gguf_available() and is_torch_available(): from gguf import GGUFReader, dequantize else: logger.error('Loading a GGUF checkpoint in PyTorch, requires both PyTorch and GGUF>=0.10.0 to be installed. Please see https://pytorch.org/ and https://github.com/ggerganov/llama.cpp/tree/master/gguf-py for installation instructions.') raise ImportError('Please install torch and gguf>=0.10.0 to load a GGUF checkpoint in PyTorch.') reader = GGUFReader(gguf_checkpoint_path) fields = reader.fields reader_keys = list(fields.keys()) parsed_parameters = {k: {} for k in GGUF_TO_TRANSFORMERS_MAPPING} architecture = read_field(reader, 'general.architecture')[0] model_name = read_field(reader, 'general.name') if 'llama' in architecture and 'mistral' in model_name: updated_architecture = 'mistral' else: updated_architecture = architecture if 'qwen2moe' in architecture: updated_architecture = 'qwen2_moe' if architecture not in GGUF_SUPPORTED_ARCHITECTURES: raise ValueError(f'Architecture {architecture} not supported') for (gguf_key, field) in reader.fields.items(): gguf_key = gguf_key.replace(architecture, updated_architecture) split = gguf_key.split('.') prefix = split[0] config_key = '.'.join(split[1:]) value = [_gguf_parse_value(field.parts[_data_index], field.types) for _data_index in field.data] if len(value) == 1: value = value[0] if isinstance(value, str) and architecture in value: value = value.replace(architecture, updated_architecture) for parameter in GGUF_TO_TRANSFORMERS_MAPPING: parameter_renames = GGUF_TO_TRANSFORMERS_MAPPING[parameter] if prefix in parameter_renames and config_key in parameter_renames[prefix]: renamed_config_key = parameter_renames[prefix][config_key] if renamed_config_key == -1: continue if renamed_config_key is not None: parsed_parameters[parameter][renamed_config_key] = value if gguf_key in reader_keys: reader_keys.remove(gguf_key) if gguf_key in reader_keys: logger.info(f'Some keys were not parsed and added into account {gguf_key} | {value}') if 'vocab_size' not in parsed_parameters['config']: tokenizer_parameters = parsed_parameters['tokenizer'] if 'tokens' in tokenizer_parameters: parsed_parameters['config']['vocab_size'] = len(tokenizer_parameters['tokens']) else: logger.warning("Can't find a way to retrieve missing config vocab_size from tokenizer parameters. This will use default value from model config class and cause unexpected behavior.") if return_tensors: tensor_key_mapping = GGUF_TO_TRANSFORMERS_MAPPING['tensors'][architecture] for tensor in tqdm(reader.tensors, desc='Converting and de-quantizing GGUF tensors...'): renamed_tensor_name = tensor.name for tensor_name_mapping in GGUF_TO_TRANSFORMERS_MAPPING['tensors']: if tensor_name_mapping in renamed_tensor_name: renamed_tensor_name = renamed_tensor_name.replace(tensor_name_mapping, GGUF_TO_TRANSFORMERS_MAPPING['tensors'][tensor_name_mapping]) name = tensor.name weights = dequantize(tensor.data, tensor.tensor_type) if architecture == 'llama' and ('.attn_k.' in name or '.attn_q.' in name): num_heads = parsed_parameters['config']['num_attention_heads'] num_kv_heads = parsed_parameters['config']['num_key_value_heads'] if '.attn_q.' in name: weights = reverse_permute_weights(weights, num_heads, num_heads) elif '.attn_k.' in name: weights = reverse_permute_weights(weights, num_heads, num_kv_heads) for tensor_name in tensor_key_mapping: if tensor_name in name: name = name.replace(tensor_name, tensor_key_mapping[tensor_name]) parsed_parameters['tensors'][name] = torch.from_numpy(np.copy(weights)) if len(reader_keys) > 0: logger.info(f'Some keys of the GGUF file were not considered: {reader_keys}') return parsed_parameters def reverse_permute_weights(weights: np.ndarray, n_head: int, num_kv_heads: Optional[int]=None) -> np.ndarray: if num_kv_heads is not None and n_head != num_kv_heads: n_head = num_kv_heads dim = weights.shape[0] // n_head // 2 w = weights.reshape(n_head, dim, 2, *weights.shape[1:]) return w.swapaxes(2, 1).reshape(weights.shape) # File: transformers-main/src/transformers/modeling_outputs.py import warnings from dataclasses import dataclass from typing import Optional, Tuple import torch from .utils import ModelOutput @dataclass class BaseModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithNoAttention(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPooling(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndNoAttention(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPast(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithCrossAttentions(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPastAndCrossAttentions(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MoECausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None z_loss: torch.FloatTensor = None aux_loss: torch.FloatTensor = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoEModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_probs: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoeModelOutputWithPast(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoeCausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None aux_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class MoEModelOutputWithPastAndCrossAttentions(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None router_probs: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqMoEModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CausalLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CausalLMOutputWithCrossAttentions(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SequenceClassifierOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MaskedLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqMoEOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None encoder_z_loss: torch.FloatTensor = None decoder_z_loss: torch.FloatTensor = None encoder_aux_loss: torch.FloatTensor = None decoder_aux_loss: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None @dataclass class NextSentencePredictorOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SequenceClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqSequenceClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class MultipleChoiceModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TokenClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class QuestionAnsweringModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqQuestionAnsweringModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class SemanticSegmenterOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageClassifierOutputWithNoAttention(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DepthEstimatorOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None predicted_depth: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ImageSuperResolutionOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Wav2Vec2BaseModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None extract_features: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class XVectorOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None embeddings: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BackboneOutput(ModelOutput): feature_maps: Tuple[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndProjection(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None projection_state: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Seq2SeqSpectrogramOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None spectrogram: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Seq2SeqTSModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None @dataclass class Seq2SeqTSPredictionOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None params: Optional[Tuple[torch.FloatTensor]] = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None @dataclass class SampleTSPredictionOutput(ModelOutput): sequences: torch.FloatTensor = None @dataclass class MaskedImageModelingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @property def logits(self): warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning) return self.reconstruction # File: transformers-main/src/transformers/modeling_rope_utils.py import math from typing import Optional, Tuple from .configuration_utils import PretrainedConfig from .utils import is_torch_available, logging logger = logging.get_logger(__name__) if is_torch_available(): import torch def _compute_default_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: if config is not None and len(rope_kwargs) > 0: raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}') if len(rope_kwargs) > 0: base = rope_kwargs['base'] dim = rope_kwargs['dim'] elif config is not None: base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0 head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) dim = int(head_dim * partial_rotary_factor) attention_factor = 1.0 inv_freq = 1.0 / base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim) return (inv_freq, attention_factor) def _compute_linear_scaling_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: if config is not None and len(rope_kwargs) > 0: raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}') if len(rope_kwargs) > 0: factor = rope_kwargs['factor'] elif config is not None: factor = config.rope_scaling['factor'] (inv_freq, attention_factor) = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs) inv_freq /= factor return (inv_freq, attention_factor) def _compute_dynamic_ntk_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: if config is not None and len(rope_kwargs) > 0: raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_dynamic_ntk_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}') if len(rope_kwargs) > 0: base = rope_kwargs['base'] dim = rope_kwargs['dim'] max_position_embeddings = rope_kwargs['max_position_embeddings'] factor = rope_kwargs['factor'] elif config is not None: base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0 head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) dim = int(head_dim * partial_rotary_factor) max_position_embeddings = config.max_position_embeddings factor = config.rope_scaling['factor'] attention_factor = 1.0 seq_len = seq_len if seq_len is not None and seq_len > max_position_embeddings else max_position_embeddings base = base * (factor * seq_len / max_position_embeddings - (factor - 1)) ** (dim / (dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim) return (inv_freq, attention_factor) def _compute_yarn_parameters(config: PretrainedConfig, device: 'torch.device', seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: if len(rope_kwargs) > 0: raise ValueError(f'Unexpected arguments: `**rope_kwargs` should be unset in `_compute_yarn_parameters`, got {rope_kwargs}') base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0 head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) dim = int(head_dim * partial_rotary_factor) max_position_embeddings = config.max_position_embeddings factor = config.rope_scaling['factor'] attention_factor = config.rope_scaling.get('attention_factor') if attention_factor is None: attention_factor = 0.1 * math.log(factor) + 1.0 beta_fast = config.rope_scaling.get('beta_fast') or 32 beta_slow = config.rope_scaling.get('beta_slow') or 1 def find_correction_dim(num_rotations, dim, base, max_position_embeddings): return dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi)) / (2 * math.log(base)) def find_correction_range(low_rot, high_rot, dim, base, max_position_embeddings): low = math.floor(find_correction_dim(low_rot, dim, base, max_position_embeddings)) high = math.ceil(find_correction_dim(high_rot, dim, base, max_position_embeddings)) return (max(low, 0), min(high, dim - 1)) def linear_ramp_factor(min, max, dim): if min == max: max += 0.001 linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min) ramp_func = torch.clamp(linear_func, 0, 1) return ramp_func pos_freqs = base ** (torch.arange(0, dim, 2).float().to(device) / dim) inv_freq_extrapolation = 1.0 / pos_freqs inv_freq_interpolation = 1.0 / (factor * pos_freqs) (low, high) = find_correction_range(beta_fast, beta_slow, dim, base, max_position_embeddings) inv_freq_extrapolation_factor = 1 - linear_ramp_factor(low, high, dim // 2).float().to(device) inv_freq = inv_freq_interpolation * (1 - inv_freq_extrapolation_factor) + inv_freq_extrapolation * inv_freq_extrapolation_factor return (inv_freq, attention_factor) def _compute_longrope_parameters(config: PretrainedConfig, device: 'torch.device', seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: if len(rope_kwargs) > 0: raise ValueError(f'Unexpected arguments: `**rope_kwargs` should be unset in `_compute_longrope_parameters`, got {rope_kwargs}') base = config.rope_theta partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0 head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) dim = int(head_dim * partial_rotary_factor) long_factor = config.rope_scaling['long_factor'] short_factor = config.rope_scaling['short_factor'] factor = config.rope_scaling.get('factor') attention_factor = config.rope_scaling.get('attention_factor') if hasattr(config, 'original_max_position_embeddings'): max_position_embeddings = config.original_max_position_embeddings expanded_max_position_embeddings = config.max_position_embeddings factor = expanded_max_position_embeddings / max_position_embeddings else: max_position_embeddings = config.max_position_embeddings expanded_max_position_embeddings = max_position_embeddings * factor if attention_factor is None: if factor <= 1.0: attention_factor = 1.0 else: attention_factor = math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings)) if expanded_max_position_embeddings > max_position_embeddings: ext_factors = torch.tensor(long_factor, dtype=torch.float32, device=device) else: ext_factors = torch.tensor(short_factor, dtype=torch.float32, device=device) inv_freq_shape = torch.arange(0, dim, 2, dtype=torch.int64, device=device).float() / dim inv_freq = 1.0 / (ext_factors * base ** inv_freq_shape) return (inv_freq, attention_factor) def _compute_llama3_parameters(config: PretrainedConfig, device: 'torch.device', seq_len: Optional[int]=None, **rope_kwargs) -> Tuple['torch.Tensor', float]: (inv_freq, attention_factor) = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs) factor = config.rope_scaling['factor'] low_freq_factor = config.rope_scaling['low_freq_factor'] high_freq_factor = config.rope_scaling['high_freq_factor'] old_context_len = config.rope_scaling['original_max_position_embeddings'] low_freq_wavelen = old_context_len / low_freq_factor high_freq_wavelen = old_context_len / high_freq_factor wavelen = 2 * math.pi / inv_freq inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq) smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor) smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen) inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama) return (inv_freq_llama, attention_factor) ROPE_INIT_FUNCTIONS = {'default': _compute_default_rope_parameters, 'linear': _compute_linear_scaling_rope_parameters, 'dynamic': _compute_dynamic_ntk_parameters, 'yarn': _compute_yarn_parameters, 'longrope': _compute_longrope_parameters, 'llama3': _compute_llama3_parameters} def _check_received_keys(rope_type: str, received_keys: set, required_keys: set, optional_keys: Optional[set]=None): if 'type' in received_keys: received_keys -= {'type'} required_keys.add('rope_type') missing_keys = required_keys - received_keys if missing_keys: raise KeyError(f"Missing required keys in `rope_scaling` for 'rope_type'='{rope_type}': {missing_keys}") if optional_keys is not None: unused_keys = received_keys - required_keys - optional_keys else: unused_keys = received_keys - required_keys if unused_keys: logger.warning(f"Unrecognized keys in `rope_scaling` for 'rope_type'='{rope_type}': {unused_keys}") def _validate_default_rope_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys) def _validate_linear_scaling_rope_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type', 'factor'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys) factor = rope_scaling['factor'] if factor is None or not isinstance(factor, float) or factor < 1.0: logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") def _validate_dynamic_scaling_rope_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type', 'factor'} optional_keys = {'original_max_position_embeddings'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys, optional_keys) factor = rope_scaling['factor'] if factor is None or not isinstance(factor, float) or factor < 1.0: logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") def _validate_yarn_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type', 'factor'} optional_keys = {'attention_factor', 'beta_fast', 'beta_slow'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys, optional_keys) factor = rope_scaling['factor'] if factor is None or not isinstance(factor, float) or factor < 1.0: logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") attention_factor = rope_scaling.get('attention_factor') if attention_factor is not None and (not isinstance(attention_factor, float) or attention_factor < 0): logger.warning(f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}") beta_fast = rope_scaling.get('beta_fast') if beta_fast is not None and (not isinstance(beta_fast, float)): logger.warning(f"`rope_scaling`'s beta_fast field must be a float, got {beta_fast}") beta_slow = rope_scaling.get('beta_slow') if beta_slow is not None and (not isinstance(beta_slow, float)): logger.warning(f"`rope_scaling`'s beta_slow field must be a float, got {beta_slow}") if (beta_fast or 32) < (beta_slow or 1): logger.warning(f"`rope_scaling`'s beta_fast field must be greater than beta_slow, got beta_fast={beta_fast} (defaults to 32 if None) and beta_slow={beta_slow} (defaults to 1 if None)") def _validate_longrope_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type', 'short_factor', 'long_factor'} optional_keys = {'attention_factor', 'factor', 'original_max_position_embeddings'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys, optional_keys) partial_rotary_factor = config.partial_rotary_factor if hasattr(config, 'partial_rotary_factor') else 1.0 head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) dim = int(head_dim * partial_rotary_factor) short_factor = rope_scaling.get('short_factor') if not isinstance(short_factor, list) and all((isinstance(x, (int, float)) for x in short_factor)): logger.warning(f"`rope_scaling`'s short_factor field must be a list of numbers, got {short_factor}") if not len(short_factor) == dim // 2: logger.warning(f"`rope_scaling`'s short_factor field must have length {dim // 2}, got {len(short_factor)}") long_factor = rope_scaling.get('long_factor') if not isinstance(long_factor, list) and all((isinstance(x, (int, float)) for x in long_factor)): logger.warning(f"`rope_scaling`'s long_factor field must be a list of numbers, got {long_factor}") if not len(long_factor) == dim // 2: logger.warning(f"`rope_scaling`'s long_factor field must have length {dim // 2}, got {len(long_factor)}") if hasattr(config, 'original_max_position_embeddings'): logger.warning_once('This model has set a `original_max_position_embeddings` field, to be used together with `max_position_embeddings` to determine a scaling factor. Please set the `factor` field of `rope_scaling`with this ratio instead -- we recommend the use of this field over `original_max_position_embeddings`, as it is compatible with most model architectures.') else: factor = rope_scaling.get('factor') if factor is None: logger.warning("Missing required keys in `rope_scaling`: 'factor'") elif not isinstance(factor, float) or factor < 1.0: logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") attention_factor = rope_scaling.get('attention_factor') if attention_factor is not None: if not isinstance(attention_factor, float) or attention_factor < 0.0: logger.warning(f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}") def _validate_llama3_parameters(config: PretrainedConfig): rope_scaling = config.rope_scaling rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', None)) required_keys = {'rope_type', 'factor', 'original_max_position_embeddings', 'low_freq_factor', 'high_freq_factor'} received_keys = set(rope_scaling.keys()) _check_received_keys(rope_type, received_keys, required_keys) factor = rope_scaling['factor'] if factor is None or not isinstance(factor, float) or factor < 1.0: logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}") low_freq_factor = rope_scaling['low_freq_factor'] high_freq_factor = rope_scaling['high_freq_factor'] if low_freq_factor is None or not isinstance(low_freq_factor, float): logger.warning(f"`rope_scaling`'s low_freq_factor field must be a float, got {low_freq_factor}") if high_freq_factor is None or not isinstance(high_freq_factor, float): logger.warning(f"`rope_scaling`'s high_freq_factor field must be a float, got {high_freq_factor}") if high_freq_factor <= low_freq_factor: logger.warning(f"`rope_scaling`'s high_freq_factor field must be greater than low_freq_factor, got high_freq_factor={high_freq_factor} and low_freq_factor={low_freq_factor}") original_max_position_embeddings = rope_scaling['original_max_position_embeddings'] if original_max_position_embeddings is None or not isinstance(original_max_position_embeddings, int): logger.warning(f"`rope_scaling`'s original_max_position_embeddings field must be an integer, got {original_max_position_embeddings}") if original_max_position_embeddings >= config.max_position_embeddings: logger.warning(f"`rope_scaling`'s original_max_position_embeddings field must be less than max_position_embeddings, got {original_max_position_embeddings} and max_position_embeddings={config.max_position_embeddings}") ROPE_VALIDATION_FUNCTIONS = {'default': _validate_default_rope_parameters, 'linear': _validate_linear_scaling_rope_parameters, 'dynamic': _validate_dynamic_scaling_rope_parameters, 'yarn': _validate_yarn_parameters, 'longrope': _validate_longrope_parameters, 'llama3': _validate_llama3_parameters} def rope_config_validation(config: PretrainedConfig): rope_scaling = getattr(config, 'rope_scaling', None) if rope_scaling is None: return rope_type = rope_scaling.get('rope_type', rope_scaling.get('type', 'default')) validation_fn = ROPE_VALIDATION_FUNCTIONS.get(rope_type) if validation_fn is not None: validation_fn(config) else: logger.warning(f"Missing validation function mapping in `ROPE_VALIDATION_FUNCTIONS` for 'rope_type'='{rope_type}'") # File: transformers-main/src/transformers/modeling_tf_outputs.py from __future__ import annotations import warnings from dataclasses import dataclass from typing import List, Optional, Tuple import tensorflow as tf from .utils import ModelOutput @dataclass class TFBaseModelOutput(ModelOutput): last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFBaseModelOutputWithNoAttention(ModelOutput): last_hidden_state: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None @dataclass class TFBaseModelOutputWithPooling(ModelOutput): last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFBaseModelOutputWithPoolingAndNoAttention(ModelOutput): last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None @dataclass class TFBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput): last_hidden_state: tf.Tensor = None pooler_output: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFBaseModelOutputWithPast(ModelOutput): last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFBaseModelOutputWithCrossAttentions(ModelOutput): last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFBaseModelOutputWithPastAndCrossAttentions(ModelOutput): last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSeq2SeqModelOutput(ModelOutput): last_hidden_state: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFCausalLMOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFCausalLMOutputWithPast(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFCausalLMOutputWithCrossAttentions(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFMaskedLMOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSeq2SeqLMOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFNextSentencePredictorOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSequenceClassifierOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSeq2SeqSequenceClassifierOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None cross_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSemanticSegmenterOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSemanticSegmenterOutputWithNoAttention(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None @dataclass class TFImageClassifierOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFMultipleChoiceModelOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFTokenClassifierOutput(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFQuestionAnsweringModelOutput(ModelOutput): loss: tf.Tensor | None = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput): loss: tf.Tensor | None = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None decoder_hidden_states: Tuple[tf.Tensor] | None = None decoder_attentions: Tuple[tf.Tensor] | None = None encoder_last_hidden_state: tf.Tensor | None = None encoder_hidden_states: Tuple[tf.Tensor] | None = None encoder_attentions: Tuple[tf.Tensor] | None = None @dataclass class TFSequenceClassifierOutputWithPast(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFImageClassifierOutputWithNoAttention(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor, ...]] = None @dataclass class TFMaskedImageModelingOutput(ModelOutput): loss: tf.Tensor | None = None reconstruction: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @property def logits(self): warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning) return self.reconstruction # File: transformers-main/src/transformers/modeling_tf_pytorch_utils.py """""" import os import re import numpy from .utils import ExplicitEnum, expand_dims, is_numpy_array, is_safetensors_available, is_torch_tensor, logging, reshape, squeeze, tensor_size from .utils import transpose as transpose_func if is_safetensors_available(): from safetensors import safe_open logger = logging.get_logger(__name__) class TransposeType(ExplicitEnum): NO = 'no' SIMPLE = 'simple' CONV1D = 'conv1d' CONV2D = 'conv2d' def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove='', tf_weight_shape=None, name_scope=None): if name_scope is not None: if not tf_name.startswith(name_scope) and 'final_logits_bias' not in tf_name: raise ValueError(f'Weight name {tf_name} does not start with name_scope {name_scope}. This is an internal error in Transformers, so (unless you were doing something really evil) please open an issue to report it!') tf_name = tf_name[len(name_scope):] tf_name = tf_name.lstrip('/') tf_name = tf_name.replace(':0', '') tf_name = re.sub('/[^/]*___([^/]*)/', '/\\1/', tf_name) tf_name = tf_name.replace('_._', '/') tf_name = re.sub('//+', '/', tf_name) tf_name = tf_name.split('/') if len(tf_name) > 1: tf_name = tf_name[1:] tf_weight_shape = list(tf_weight_shape) if tf_name[-1] == 'kernel' and tf_weight_shape is not None and (len(tf_weight_shape) == 4): transpose = TransposeType.CONV2D elif tf_name[-1] == 'kernel' and tf_weight_shape is not None and (len(tf_weight_shape) == 3): transpose = TransposeType.CONV1D elif bool(tf_name[-1] in ['kernel', 'pointwise_kernel', 'depthwise_kernel'] or 'emb_projs' in tf_name or 'out_projs' in tf_name): transpose = TransposeType.SIMPLE else: transpose = TransposeType.NO if tf_name[-1] == 'kernel' or tf_name[-1] == 'embeddings' or tf_name[-1] == 'gamma': tf_name[-1] = 'weight' if tf_name[-1] == 'beta': tf_name[-1] = 'bias' if tf_name[-1] == 'pointwise_kernel' or tf_name[-1] == 'depthwise_kernel': tf_name[-1] = tf_name[-1].replace('_kernel', '.weight') tf_name = '.'.join(tf_name) if start_prefix_to_remove: tf_name = tf_name.replace(start_prefix_to_remove, '', 1) return (tf_name, transpose) def apply_transpose(transpose: TransposeType, weight, match_shape=None, pt_to_tf=True): if transpose is TransposeType.CONV2D: axes = (2, 3, 1, 0) if pt_to_tf else (3, 2, 0, 1) weight = transpose_func(weight, axes=axes) elif transpose is TransposeType.CONV1D: weight = transpose_func(weight, axes=(2, 1, 0)) elif transpose is TransposeType.SIMPLE: weight = transpose_func(weight) if match_shape is None: return weight if len(match_shape) < len(weight.shape): weight = squeeze(weight) elif len(match_shape) > len(weight.shape): weight = expand_dims(weight, axis=0) if list(match_shape) != list(weight.shape): try: weight = reshape(weight, match_shape) except AssertionError as e: e.args += (match_shape, match_shape) raise e return weight def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None): try: import tensorflow as tf import torch from safetensors.torch import load_file as safe_load_file from .pytorch_utils import is_torch_greater_or_equal_than_1_13 except ImportError: logger.error('Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.') raise if isinstance(pytorch_checkpoint_path, str): pytorch_checkpoint_path = [pytorch_checkpoint_path] pt_state_dict = {} for path in pytorch_checkpoint_path: pt_path = os.path.abspath(path) logger.info(f'Loading PyTorch weights from {pt_path}') if pt_path.endswith('.safetensors'): state_dict = safe_load_file(pt_path) else: weights_only_kwarg = {'weights_only': True} if is_torch_greater_or_equal_than_1_13 else {} state_dict = torch.load(pt_path, map_location='cpu', **weights_only_kwarg) pt_state_dict.update(state_dict) logger.info(f'PyTorch checkpoint contains {sum((t.numel() for t in pt_state_dict.values())):,} parameters') return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, _prefix=_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename) def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False): pt_state_dict = pt_model.state_dict() return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys) def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None): try: import tensorflow as tf import torch except ImportError: logger.error('Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.') raise pt_state_dict = {k: v.numpy() if v.dtype != torch.bfloat16 else v.float().numpy() for (k, v) in pt_state_dict.items()} return load_pytorch_state_dict_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, _prefix=_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename) def _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name): if len(unexpected_keys) > 0: logger.warning(f'Some weights of the PyTorch model were not used when initializing the TF 2.0 model {class_name}: {unexpected_keys}\n- This IS expected if you are initializing {class_name} from a PyTorch model trained on another task or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {class_name} from a PyTorch model that you expect to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.warning(f'All PyTorch model weights were used when initializing {class_name}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights or buffers of the TF 2.0 model {class_name} were not initialized from the PyTorch model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') else: logger.warning(f'All the weights of {class_name} were initialized from the PyTorch model.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {class_name} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {class_name} were not initialized from the model checkpoint are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') def load_pytorch_state_dict_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None, ignore_mismatched_sizes=False, skip_logger_warnings=False): import tensorflow as tf if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if _prefix is None: _prefix = '' if tf_inputs: with tf.name_scope(_prefix): tf_model(tf_inputs, training=False) tf_keys_to_pt_keys = {} for key in pt_state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if 'running_var' in key: new_key = key.replace('running_var', 'moving_variance') if 'running_mean' in key: new_key = key.replace('running_mean', 'moving_mean') key_components = key.split('.') name = None if key_components[-3::2] == ['parametrizations', 'original0']: name = key_components[-2] + '_g' elif key_components[-3::2] == ['parametrizations', 'original1']: name = key_components[-2] + '_v' if name is not None: key_components = key_components[:-3] + [name] new_key = '.'.join(key_components) if new_key is None: new_key = key tf_keys_to_pt_keys[new_key] = key start_prefix_to_remove = '' if not any((s.startswith(tf_model.base_model_prefix) for s in tf_keys_to_pt_keys.keys())): start_prefix_to_remove = tf_model.base_model_prefix + '.' symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights tf_loaded_numel = 0 all_pytorch_weights = set(tf_keys_to_pt_keys.keys()) missing_keys = [] mismatched_keys = [] is_safetensor_archive = hasattr(pt_state_dict, 'get_tensor') for symbolic_weight in symbolic_weights: sw_name = symbolic_weight.name (name, transpose) = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=symbolic_weight.shape, name_scope=_prefix) if tf_to_pt_weight_rename is not None: aliases = tf_to_pt_weight_rename(name) for alias in aliases: if alias in tf_keys_to_pt_keys: name = alias break else: name = aliases[0] if name not in tf_keys_to_pt_keys: if allow_missing_keys: missing_keys.append(name) continue elif tf_model._keys_to_ignore_on_load_missing is not None: if any((re.search(pat, name) is not None for pat in tf_model._keys_to_ignore_on_load_missing)): continue raise AttributeError(f'{name} not found in PyTorch model') state_dict_name = tf_keys_to_pt_keys[name] if is_safetensor_archive: array = pt_state_dict.get_tensor(state_dict_name) else: array = pt_state_dict[state_dict_name] try: array = apply_transpose(transpose, array, symbolic_weight.shape) except tf.errors.InvalidArgumentError as e: if not ignore_mismatched_sizes: error_msg = str(e) error_msg += '\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.' raise tf.errors.InvalidArgumentError(error_msg) else: mismatched_keys.append((name, array.shape, symbolic_weight.shape)) continue tf_loaded_numel += tensor_size(array) symbolic_weight.assign(tf.cast(array, symbolic_weight.dtype)) del array all_pytorch_weights.discard(name) logger.info(f'Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.') unexpected_keys = list(all_pytorch_weights) if tf_model._keys_to_ignore_on_load_missing is not None: for pat in tf_model._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if tf_model._keys_to_ignore_on_load_unexpected is not None: for pat in tf_model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if not skip_logger_warnings: _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name=tf_model.__class__.__name__) if output_loading_info: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys} return (tf_model, loading_info) return tf_model def load_sharded_pytorch_safetensors_in_tf2_model(tf_model, safetensors_shards, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None, ignore_mismatched_sizes=False): all_loading_infos = [] for shard in safetensors_shards: with safe_open(shard, framework='tf') as safetensors_archive: (tf_model, loading_info) = load_pytorch_state_dict_in_tf2_model(tf_model, safetensors_archive, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=True, _prefix=_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename, ignore_mismatched_sizes=ignore_mismatched_sizes, skip_logger_warnings=True) all_loading_infos.append(loading_info) missing_keys = sorted(set.intersection(*[set(info['missing_keys']) for info in all_loading_infos])) unexpected_keys = sum([info['unexpected_keys'] for info in all_loading_infos], []) mismatched_keys = sum([info['mismatched_keys'] for info in all_loading_infos], []) _log_key_warnings(missing_keys, unexpected_keys, mismatched_keys, class_name=tf_model.__class__.__name__) if output_loading_info: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys} return (tf_model, loading_info) return tf_model def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False): try: import tensorflow as tf import torch except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.') raise import transformers from .modeling_tf_utils import load_tf_weights logger.info(f'Loading TensorFlow weights from {tf_checkpoint_path}') tf_model_class_name = 'TF' + pt_model.__class__.__name__ tf_model_class = getattr(transformers, tf_model_class_name) tf_model = tf_model_class(pt_model.config) if tf_inputs is None: tf_inputs = tf_model.dummy_inputs if tf_inputs is not None: tf_model(tf_inputs, training=False) load_tf_weights(tf_model, tf_checkpoint_path) return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info) def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False, output_loading_info=False): weights = tf_model.weights return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info) def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False, output_loading_info=False): try: import tensorflow as tf import torch except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.') raise tf_state_dict = {tf_weight.name: tf_weight.numpy() for tf_weight in tf_weights} return load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info) def load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=False, output_loading_info=False): import torch new_pt_params_dict = {} current_pt_params_dict = dict(pt_model.named_parameters()) start_prefix_to_remove = '' if not any((s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys())): start_prefix_to_remove = pt_model.base_model_prefix + '.' tf_weights_map = {} for (name, tf_weight) in tf_state_dict.items(): (pt_name, transpose) = convert_tf_weight_name_to_pt_weight_name(name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=tf_weight.shape) tf_weights_map[pt_name] = (tf_weight, transpose) all_tf_weights = set(tf_weights_map.keys()) loaded_pt_weights_data_ptr = {} missing_keys_pt = [] for (pt_weight_name, pt_weight) in current_pt_params_dict.items(): if pt_weight.data_ptr() in loaded_pt_weights_data_ptr: new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()] continue pt_weight_name_to_check = pt_weight_name key_components = pt_weight_name.split('.') name = None if key_components[-3::2] == ['parametrizations', 'original0']: name = key_components[-2] + '_g' elif key_components[-3::2] == ['parametrizations', 'original1']: name = key_components[-2] + '_v' if name is not None: key_components = key_components[:-3] + [name] pt_weight_name_to_check = '.'.join(key_components) if pt_weight_name_to_check not in tf_weights_map: if allow_missing_keys: missing_keys_pt.append(pt_weight_name) continue raise AttributeError(f'{pt_weight_name} not found in TF 2.0 model') (array, transpose) = tf_weights_map[pt_weight_name_to_check] array = apply_transpose(transpose, array, pt_weight.shape, pt_to_tf=False) if numpy.isscalar(array): array = numpy.array(array) if not is_torch_tensor(array) and (not is_numpy_array(array)): array = array.numpy() if is_numpy_array(array): array = torch.from_numpy(array) new_pt_params_dict[pt_weight_name] = array loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = array all_tf_weights.discard(pt_weight_name) (missing_keys, unexpected_keys) = pt_model.load_state_dict(new_pt_params_dict, strict=False) missing_keys += missing_keys_pt if pt_model._keys_to_ignore_on_load_missing is not None: for pat in pt_model._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if pt_model._keys_to_ignore_on_load_unexpected is not None: for pat in pt_model._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f'Some weights of the TF 2.0 model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).\n- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model).') else: logger.warning(f'All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') else: logger.warning(f'All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {pt_model.__class__.__name__} for predictions without further training.') logger.info(f'Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}') if output_loading_info: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys} return (pt_model, loading_info) return pt_model # File: transformers-main/src/transformers/modeling_tf_utils.py """""" from __future__ import annotations import functools import gc import inspect import json import os import pickle import re import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union import h5py import numpy as np import tensorflow as tf from packaging.version import parse from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation import GenerationConfig, TFGenerationMixin from .tf_utils import convert_batch_encoding, expand_1d, load_attributes_from_hdf5_group, save_attributes_to_hdf5_group, shape_list from .utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ModelOutput, PushToHubMixin, cached_file, download_url, find_labels, has_file, is_offline_mode, is_remote_url, is_safetensors_available, is_tf_symbolic_tensor, logging, requires_backends, working_or_temp_dir from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files if is_safetensors_available(): from safetensors import safe_open from safetensors.tensorflow import save_file as safe_save_file if TYPE_CHECKING: from . import PreTrainedTokenizerBase logger = logging.get_logger(__name__) if 'TF_USE_LEGACY_KERAS' not in os.environ: os.environ['TF_USE_LEGACY_KERAS'] = '1' elif os.environ['TF_USE_LEGACY_KERAS'] != '1': logger.warning('Transformers is only compatible with Keras 2, but you have explicitly set `TF_USE_LEGACY_KERAS` to `0`. This may result in unexpected behaviour or errors if Keras 3 objects are passed to Transformers models.') try: import tf_keras as keras from tf_keras import backend as K except (ModuleNotFoundError, ImportError): import keras from keras import backend as K if parse(keras.__version__).major > 2: raise ValueError('Your currently installed version of Keras is Keras 3, but this is not yet supported in Transformers. Please install the backwards-compatible tf-keras package with `pip install tf-keras`.') tf_logger = tf.get_logger() TFModelInputType = Union[List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], tf.Tensor, np.ndarray] def dummy_loss(y_true, y_pred): if y_pred.shape.rank <= 1: return y_pred else: reduction_axes = list(range(1, y_pred.shape.rank)) return tf.reduce_mean(y_pred, axis=reduction_axes) class TFModelUtilsMixin: def num_parameters(self, only_trainable: bool=False) -> int: if only_trainable: return int(sum((np.prod(w.shape.as_list()) for w in self.trainable_variables))) else: return self.count_params() def keras_serializable(cls): initializer = cls.__init__ config_class = getattr(cls, 'config_class', None) if config_class is None: raise AttributeError('Must set `config_class` to use @keras_serializable') @functools.wraps(initializer) def wrapped_init(self, *args, **kwargs): config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop('config', None) if isinstance(config, dict): config = config_class.from_dict(config) initializer(self, config, *args, **kwargs) elif isinstance(config, PretrainedConfig): if len(args) > 0: initializer(self, *args, **kwargs) else: initializer(self, config, *args, **kwargs) else: raise ValueError('Must pass either `config` (PretrainedConfig) or `config` (dict)') self._config = config self._kwargs = kwargs cls.__init__ = wrapped_init if not hasattr(cls, 'get_config'): raise TypeError('Only use @keras_serializable on keras.layers.Layer subclasses') if hasattr(cls.get_config, '_is_default'): def get_config(self): cfg = super(cls, self).get_config() cfg['config'] = self._config.to_dict() cfg.update(self._kwargs) return cfg cls.get_config = get_config cls._keras_serializable = True if hasattr(keras.utils, 'register_keras_serializable'): cls = keras.utils.register_keras_serializable()(cls) return cls class TFCausalLanguageModelingLoss: def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) unmasked_loss = loss_fn(tf.nn.relu(labels), logits) loss_mask = tf.cast(labels != -100, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,)) class TFQuestionAnsweringLoss: def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) start_loss = loss_fn(labels['start_position'], logits[0]) end_loss = loss_fn(labels['end_position'], logits[1]) return (start_loss + end_loss) / 2.0 class TFTokenClassificationLoss: def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if tf.executing_eagerly(): if tf.math.reduce_any(labels == -1): tf.print('Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.') if self.config.tf_legacy_loss: if tf.math.reduce_any(labels == -1): tf.print('Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.') active_loss = tf.reshape(labels, (-1,)) != -1 else: active_loss = tf.reshape(labels, (-1,)) != -100 reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss) labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss) return loss_fn(labels, reduced_logits) unmasked_loss = loss_fn(tf.nn.relu(labels), logits) loss_mask = tf.cast(labels >= 0, dtype=unmasked_loss.dtype) masked_loss = unmasked_loss * loss_mask reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(loss_mask) return tf.reshape(reduced_masked_loss, (1,)) class TFSequenceClassificationLoss: def hf_compute_loss(self, labels, logits): if logits.shape.rank == 1 or logits.shape[1] == 1: loss_fn = keras.losses.MeanSquaredError(reduction=keras.losses.Reduction.NONE) if labels.shape.rank == 1: labels = tf.expand_dims(labels, axis=-1) else: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) return loss_fn(labels, logits) class TFMultipleChoiceLoss: def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) return loss_fn(labels, logits) class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss): class TFNextSentencePredictionLoss: def hf_compute_loss(self, labels, logits): loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100) next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss) next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss) return loss_fn(next_sentence_label, next_sentence_reduced_logits) unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels), y_pred=logits) ns_loss_mask = tf.cast(labels != -100, dtype=unmasked_ns_loss.dtype) masked_ns_loss = unmasked_ns_loss * ns_loss_mask return masked_ns_loss def booleans_processing(config, **kwargs): final_booleans = {} if 'output_attentions' in kwargs: final_booleans['output_attentions'] = kwargs['output_attentions'] if kwargs['output_attentions'] is not None else config.output_attentions final_booleans['output_hidden_states'] = kwargs['output_hidden_states'] if kwargs['output_hidden_states'] is not None else config.output_hidden_states final_booleans['return_dict'] = kwargs['return_dict'] if kwargs['return_dict'] is not None else config.return_dict if 'use_cache' in kwargs: final_booleans['use_cache'] = kwargs['use_cache'] if kwargs['use_cache'] is not None else getattr(config, 'use_cache', None) return final_booleans def unpack_inputs(func): original_signature = inspect.signature(func) @functools.wraps(func) def run_call_with_unpacked_inputs(self, *args, **kwargs): kwargs_call = {key: val for (key, val) in kwargs.items() if key not in dict(original_signature.parameters)} fn_args_and_kwargs = {key: val for (key, val) in kwargs.items() if key not in kwargs_call} fn_args_and_kwargs.update({'kwargs_call': kwargs_call}) fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args))) if 'EncoderDecoder' in self.__class__.__name__: config = None else: config = self.config unpacked_inputs = input_processing(func, config, **fn_args_and_kwargs) return func(self, **unpacked_inputs) run_call_with_unpacked_inputs.__signature__ = original_signature return run_call_with_unpacked_inputs def input_processing(func, config, **kwargs): signature = dict(inspect.signature(func).parameters) has_kwargs = bool(signature.pop('kwargs', None)) signature.pop('self', None) parameter_names = list(signature.keys()) main_input_name = parameter_names[0] main_input = kwargs.pop(main_input_name, None) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) if 'inputs' in kwargs['kwargs_call']: warnings.warn('The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.', FutureWarning) output['input_ids'] = kwargs['kwargs_call'].pop('inputs') if 'decoder_cached_states' in kwargs['kwargs_call']: warnings.warn('The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.', FutureWarning) output['past_key_values'] = kwargs['kwargs_call'].pop('decoder_cached_states') if 'past' in kwargs['kwargs_call'] and 'past_key_values' in parameter_names: warnings.warn('The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.', FutureWarning) kwargs['past_key_values'] = kwargs['kwargs_call'].pop('past') elif 'past_key_values' in kwargs['kwargs_call'] and 'past' in parameter_names: kwargs['past'] = kwargs['kwargs_call'].pop('past_key_values') if has_kwargs: output['kwargs'] = kwargs.pop('kwargs_call', {}) else: if len(kwargs['kwargs_call']) > 0: raise ValueError(f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}.") kwargs.pop('kwargs_call') for (k, v) in kwargs.items(): if isinstance(v, allowed_types) or tf.is_tensor(v) or v is None: output[k] = v else: raise ValueError(f'Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.') if isinstance(main_input, (tuple, list)): for (i, input) in enumerate(main_input): if is_tf_symbolic_tensor(input): tensor_name = input.name.split(':')[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError(f'Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}.') elif isinstance(main_input, Mapping): if 'inputs' in main_input: warnings.warn('The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.', FutureWarning) output['input_ids'] = main_input.pop('inputs') if 'decoder_cached_states' in main_input: warnings.warn('The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.', FutureWarning) output['past_key_values'] = main_input.pop('decoder_cached_states') for (k, v) in dict(main_input).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and 'args' not in parameter_names: logger.warning(f'The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored.') continue else: raise ValueError(f'Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.') elif tf.is_tensor(main_input) or main_input is None: output[main_input_name] = main_input else: raise ValueError(f'Data of type {type(main_input)} is not allowed only {allowed_types} is accepted for {main_input_name}.') for name in parameter_names: if name not in list(output.keys()) and name != 'args': output[name] = kwargs.pop(name, signature[name].default) if 'args' in output: if output['args'] is not None and is_tf_symbolic_tensor(output['args']): tensor_name = output['args'].name.split(':')[0] output[tensor_name] = output['args'] else: output['input_ids'] = output['args'] del output['args'] if 'kwargs' in output: del output['kwargs'] cast_output = {} for (key, val) in output.items(): if isinstance(val, tf.Tensor) and val.dtype == tf.int64: cast_output[key] = tf.cast(val, tf.int32) elif isinstance(val, np.ndarray) and val.dtype == np.int64: cast_output[key] = val.astype(np.int32) else: cast_output[key] = val output = cast_output del cast_output if config is not None: boolean_dict = {k: v for (k, v) in output.items() if k in ['return_dict', 'output_attentions', 'output_hidden_states', 'use_cache']} output.update(booleans_processing(config=config, **boolean_dict)) return output def dtype_byte_size(dtype): if dtype == tf.bool: return 1 / 8 bit_search = re.search('[^\\d](\\d+)$', dtype.name) if bit_search is None: raise ValueError(f'`dtype` is not a valid dtype: {dtype}.') bit_size = int(bit_search.groups()[0]) return bit_size // 8 def strip_model_name_and_prefix(name, _prefix=None): if _prefix is not None and name.startswith(_prefix): name = name[len(_prefix):] if name.startswith('/'): name = name[1:] if 'model.' not in name and len(name.split('/')) > 1: name = '/'.join(name.split('/')[1:]) return name def tf_shard_checkpoint(weights, max_shard_size='10GB', weights_name: str=TF2_WEIGHTS_NAME): max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [] current_block = [] current_block_size = 0 total_size = 0 for item in weights: weight_size = item.numpy().size * dtype_byte_size(item.dtype) if current_block_size + weight_size > max_shard_size: sharded_state_dicts.append(current_block) current_block = [] current_block_size = 0 current_block.append(item) current_block_size += weight_size total_size += weight_size sharded_state_dicts.append(current_block) if len(sharded_state_dicts) == 1: return ({weights_name: sharded_state_dicts[0]}, None) weight_map = {} shards = {} for (idx, shard) in enumerate(sharded_state_dicts): shard_file = weights_name.replace('.h5', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.h5') shard_file = shard_file.replace('.safetensors', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors') shards[shard_file] = shard for weight in shard: weight_name = weight.name weight_map[weight_name] = shard_file metadata = {'total_size': total_size} index = {'metadata': metadata, 'weight_map': weight_map} return (shards, index) def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None): unexpected_keys = set() saved_keys = set() mismatched_keys = set() model_keys = set() model_layer_map = {} for (i, k) in enumerate(model.weights): layer_name = k.name if _prefix is not None and layer_name.startswith(_prefix): layer_name = layer_name[len(_prefix):] layer_name = layer_name.lstrip('/') if not ('model.' in layer_name or len(layer_name.split('/')) == 1): layer_name = '/'.join(layer_name.split('/')[1:]) model_keys.add(layer_name) model_layer_map[layer_name] = i for shard_file in shard_files: (saved_weight_names_set, unexpected_keys_set, mismatched_keys_set) = load_tf_shard(model, model_layer_map, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix) saved_keys.update(saved_weight_names_set) unexpected_keys.update(unexpected_keys_set) mismatched_keys.update(mismatched_keys_set) gc.collect() missing_keys = model_keys - saved_keys if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}' if len(missing_keys) > 0: str_missing_keys = ','.join([f'"{k}"' for k in missing_keys]) error_message += f'\nMissing key(s): {str_missing_keys}.' if len(unexpected_keys) > 0: str_unexpected_keys = ','.join([f'"{k}"' for k in unexpected_keys]) error_message += f'\nMissing key(s): {str_unexpected_keys}.' raise RuntimeError(error_message) return (missing_keys, unexpected_keys, mismatched_keys) def load_tf_shard(model, model_layer_map, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): saved_weight_names_set = set() saved_weights = {} mismatched_keys = set() unexpected_keys = set() try: with h5py.File(resolved_archive_file, 'r') as sharded_checkpoint_file: saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, 'layer_names')) weight_value_tuples = [] for layer_name in saved_h5_model_layers_name: h5_layer_object = sharded_checkpoint_file[layer_name] saved_weights[layer_name] = np.asarray(h5_layer_object) saved_weight_names_set.add(layer_name) if layer_name not in model_layer_map: unexpected_keys.add(layer_name) else: symbolic_weight = model.weights[model_layer_map[layer_name]] saved_weight_value = saved_weights[layer_name] if saved_weight_value is not None: if K.int_shape(symbolic_weight) != saved_weight_value.shape: try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_keys.add((layer_name, saved_weight_value.shape, K.int_shape(symbolic_weight))) continue else: raise e else: array = saved_weight_value weight_value_tuples.append((symbolic_weight, array)) K.batch_set_value(weight_value_tuples) return (saved_weight_names_set, unexpected_keys, mismatched_keys) except Exception as e: try: with open(resolved_archive_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError(f'Unable to locate the file {resolved_archive_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e except (UnicodeDecodeError, ValueError): raise OSError(f"Unable to load weights from TF checkpoint file for '{resolved_archive_file}' at '{resolved_archive_file}'. If you tried to load a TF model from a sharded checkpoint, you should try converting the model by loading it in pytorch and saving it localy. A convertion script should be realeased soon.") def load_tf_sharded_weights_from_safetensors(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None): unexpected_keys = set() all_missing_keys = [] mismatched_keys = set() for shard_file in shard_files: (missing_layers, unexpected_layers, mismatched_layers) = load_tf_weights_from_safetensors(model, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix) all_missing_keys.append(set(missing_layers)) unexpected_keys.update(unexpected_layers) mismatched_keys.update(mismatched_layers) gc.collect() missing_keys = set.intersection(*all_missing_keys) if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}' if len(missing_keys) > 0: str_missing_keys = ','.join([f'"{k}"' for k in missing_keys]) error_message += f'\nMissing key(s): {str_missing_keys}.' if len(unexpected_keys) > 0: str_unexpected_keys = ','.join([f'"{k}"' for k in unexpected_keys]) error_message += f'\nMissing key(s): {str_unexpected_keys}.' raise RuntimeError(error_message) return (missing_keys, unexpected_keys, mismatched_keys) def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): if resolved_archive_file.endswith('.safetensors'): load_function = load_tf_weights_from_safetensors else: load_function = load_tf_weights_from_h5 return load_function(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix) def load_tf_weights_from_h5(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): mismatched_layers = [] with h5py.File(resolved_archive_file, 'r') as sharded_checkpoint_file: saved_h5_model_layers_name = set(load_attributes_from_hdf5_group(sharded_checkpoint_file, 'layer_names')) missing_layers = list({layer.name for layer in model.layers} - saved_h5_model_layers_name) unexpected_layers = list(saved_h5_model_layers_name - {layer.name for layer in model.layers}) saved_weight_names_set = set() symbolic_weights_names = set() weight_value_tuples = [] for layer in model.layers: if layer.name in saved_h5_model_layers_name: h5_layer_object = sharded_checkpoint_file[layer.name] symbolic_weights = layer.trainable_weights + layer.non_trainable_weights saved_weights = {} for weight_name in load_attributes_from_hdf5_group(h5_layer_object, 'weight_names'): name = '/'.join(weight_name.split('/')[1:]) if _prefix is not None: name = _prefix + '/' + name saved_weights[name] = np.asarray(h5_layer_object[weight_name]) saved_weight_names_set.add(name) for symbolic_weight in symbolic_weights: if _prefix is not None: delimeter = len(_prefix.split('/')) symbolic_weight_name = '/'.join(symbolic_weight.name.split('/')[:delimeter] + symbolic_weight.name.split('/')[delimeter + 1:]) else: symbolic_weight_name = '/'.join(symbolic_weight.name.split('/')[1:]) saved_weight_value = saved_weights.get(symbolic_weight_name, None) if saved_weight_value is None and symbolic_weight_name.endswith('embeddings:0'): symbolic_weight_name = symbolic_weight_name[:-12] + 'weight:0' saved_weight_value = saved_weights.get(symbolic_weight_name, None) symbolic_weights_names.add(symbolic_weight_name) if saved_weight_value is not None: if K.int_shape(symbolic_weight) != saved_weight_value.shape: try: array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight)) except ValueError as e: if ignore_mismatched_sizes: mismatched_layers.append((symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))) continue else: raise e else: array = saved_weight_value weight_value_tuples.append((symbolic_weight, array)) K.batch_set_value(weight_value_tuples) missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set)) unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names)) return (missing_layers, unexpected_layers, mismatched_layers) def load_tf_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None): with safe_open(resolved_archive_file, framework='tf') as safetensors_archive: mismatched_layers = [] weight_names = [strip_model_name_and_prefix(w.name, _prefix=_prefix) for w in model.weights] loaded_weight_names = list(safetensors_archive.keys()) missing_layers = list(set(weight_names) - set(loaded_weight_names)) unexpected_layers = list(set(loaded_weight_names) - set(weight_names)) for weight in model.weights: weight_name = strip_model_name_and_prefix(weight.name, _prefix=_prefix) if weight_name in loaded_weight_names: weight_value = safetensors_archive.get_tensor(weight_name) if K.int_shape(weight) != weight_value.shape: try: weight_value = tf.reshape(weight_value, K.int_shape(weight)) except (ValueError, tf.errors.InvalidArgumentError) as e: if ignore_mismatched_sizes: mismatched_layers.append((weight_name, weight_value.shape, K.int_shape(weight))) continue else: raise e K.set_value(weight, weight_value) return (missing_layers, unexpected_layers, mismatched_layers) def init_copy_embeddings(old_embeddings, new_num_tokens): (old_num_tokens, old_embedding_dim) = shape_list(old_embeddings) size_diff = new_num_tokens - old_num_tokens if tf.math.greater(size_diff, 0): current_weights = tf.pad(old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True) mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False) else: current_weights = tf.slice(old_embeddings.value(), tf.convert_to_tensor([0, 0]), tf.convert_to_tensor([new_num_tokens, old_embedding_dim])) mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True) return (mask, current_weights) class TFPreTrainedModel(keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin): config_class = None base_model_prefix = '' main_input_name = 'input_ids' _auto_class = None _using_dummy_loss = None _label_to_output_map = None _keys_to_ignore_on_load_missing = None _keys_to_ignore_on_load_unexpected = None _requires_load_weight_prefix = False @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: dummies = {} for (key, spec) in self.input_signature.items(): dummy_shape = [dim if dim is not None else 2 for dim in spec.shape] if spec.shape[0] is None: dummy_shape[0] = 1 dummies[key] = tf.ones(shape=dummy_shape, dtype=spec.dtype) if key == 'token_type_ids': dummies[key] = tf.zeros_like(dummies[key]) if self.config.add_cross_attention and 'encoder_hidden_states' in inspect.signature(self.call).parameters: if 'encoder_hidden_states' not in dummies: if self.main_input_name == 'input_ids': dummies['encoder_hidden_states'] = tf.ones(shape=(1, 2, self.config.hidden_size), dtype=tf.float32, name='encoder_hidden_states') else: raise NotImplementedError("Model has cross-attention but we couldn't infer the shape for the encoder hidden states. Please manually override dummy_inputs!") return dummies def build_in_name_scope(self): with tf.name_scope(self.name): self.build(input_shape=None) @property def framework(self) -> str: return 'tf' def build(self, input_shape=None): pass def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): raise TypeError(f'Parameter config in `{self.__class__.__name__}(config)` should be an instance of class `PretrainedConfig`. To create a model from a pretrained model use `model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`') self.config = config self.name_or_path = config.name_or_path self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._set_save_spec(self.input_signature) def get_config(self): return self.config.to_dict() @functools.wraps(keras.Model.fit) def fit(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().fit(*args, **kwargs) @functools.wraps(keras.Model.train_on_batch) def train_on_batch(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().train_on_batch(*args, **kwargs) @functools.wraps(keras.Model.test_on_batch) def test_on_batch(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().test_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict_on_batch) def predict_on_batch(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().predict_on_batch(*args, **kwargs) @functools.wraps(keras.Model.predict) def predict(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().predict(*args, **kwargs) @functools.wraps(keras.Model.evaluate) def evaluate(self, *args, **kwargs): (args, kwargs) = convert_batch_encoding(*args, **kwargs) return super().evaluate(*args, **kwargs) @classmethod def from_config(cls, config, **kwargs): if isinstance(config, PretrainedConfig): return cls._from_config(config, **kwargs) return cls._from_config(cls.config_class.from_dict(config, **kwargs)) @classmethod def _from_config(cls, config, **kwargs): return cls(config, **kwargs) def get_head_mask(self, head_mask: tf.Tensor | None, num_hidden_layers: int) -> tf.Tensor: if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): if head_mask.shape.rank == 1: head_mask = head_mask[None, None, :, None, None] head_mask = tf.repeat(head_mask, repeats=num_hidden_layers, axis=0) elif head_mask.shape.rank == 2: head_mask = head_mask[:, None, :, None, None] assert head_mask.shape.rank == 5, f'head_mask.dim != 5, instead {head_mask.dim()}' head_mask = tf.cast(head_mask, tf.float32) return head_mask @tf.function def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) @property def input_signature(self) -> Dict[str, tf.TensorSpec]: model_inputs = list(inspect.signature(self.call).parameters) sig = {} if 'input_ids' in model_inputs: if self.__class__.__name__.endswith('ForMultipleChoice'): text_dims = 3 else: text_dims = 2 for input_name in ('input_ids', 'attention_mask', 'token_type_ids', 'decoder_input_ids', 'decoder_attention_mask'): if input_name in model_inputs: sig[input_name] = tf.TensorSpec([None] * text_dims, tf.int32, name=input_name) if 'pixel_values' in model_inputs: pixel_values_shape = [None, None, None, None] if hasattr(self.config, 'vision_config'): vision_config = self.config.vision_config else: vision_config = self.config if hasattr(vision_config, 'num_channels'): pixel_values_shape[1] = vision_config.num_channels else: raise NotImplementedError('Could not infer number of channels from config, please override input_signature to specify input shapes.') if hasattr(vision_config, 'image_size'): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.image_size elif hasattr(vision_config, 'input_size'): pixel_values_shape[2] = pixel_values_shape[3] = vision_config.input_size else: raise NotImplementedError('Could not infer input image shape from config, please override input_signature to specify input shapes.') sig['pixel_values'] = tf.TensorSpec(pixel_values_shape, tf.float32, name='pixel_values') if 'input_features' in model_inputs: raise NotImplementedError('Audio models need a manually defined input_signature') return sig def serving_output(self, output): if not isinstance(output, ModelOutput): return output for key in output: if key.endswith('hidden_states') and (not getattr(self.config, 'output_hidden_states', False)): output[key] = None elif key.endswith('attentions') and (not getattr(self.config, 'output_attentions', False)): output[key] = None elif key == 'past_key_values' and (not getattr(self.config, 'use_cache', False)): output[key] = None elif key == 'cross_attentions' and (not (getattr(self.config, 'output_attentions', False) and getattr(self.config, 'add_cross_attention', False))): output[key] = None if isinstance(output[key], (tuple, list)): try: output[key] = tf.convert_to_tensor(output[key]) except (ValueError, tf.errors.InvalidArgumentError): pass return output @classmethod def can_generate(cls) -> bool: if 'GenerationMixin' in str(cls.prepare_inputs_for_generation) and 'GenerationMixin' in str(cls.generate): return False return True def get_input_embeddings(self) -> keras.layers.Layer: main_layer = getattr(self, self.base_model_prefix, self) if main_layer is not self: return main_layer.get_input_embeddings() else: raise NotImplementedError def _save_checkpoint(self, checkpoint_dir, epoch): if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir) weights_path = os.path.join(checkpoint_dir, 'weights.h5') self.save_weights(weights_path) extra_data = {'epoch': epoch, 'optimizer_state': self.optimizer.get_weights()} extra_data_path = os.path.join(checkpoint_dir, 'extra_data.pickle') with open(extra_data_path, 'wb') as f: pickle.dump(extra_data, f) def prepare_tf_dataset(self, dataset: 'datasets.Dataset', batch_size: int=8, shuffle: bool=True, tokenizer: Optional['PreTrainedTokenizerBase']=None, collate_fn: Optional[Callable]=None, collate_fn_args: Optional[Dict[str, Any]]=None, drop_remainder: Optional[bool]=None, prefetch: bool=True): requires_backends(self, ['datasets']) import datasets if collate_fn is None: if tokenizer is None: collate_fn = DefaultDataCollator(return_tensors='np') else: collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors='np') if collate_fn_args is None: collate_fn_args = {} if not isinstance(dataset, datasets.Dataset): raise TypeError('Dataset argument should be a datasets.Dataset!') model_inputs = list(inspect.signature(self.call).parameters) model_labels = find_labels(self.__class__) if 'cols_to_retain' in list(inspect.signature(dataset._get_output_signature).parameters.keys()): (output_signature, _) = dataset._get_output_signature(dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args, cols_to_retain=model_inputs) else: unwanted_columns = [feature for feature in dataset.features if feature not in model_inputs and feature not in ('label_ids', 'label')] dataset = dataset.remove_columns(unwanted_columns) (output_signature, _) = dataset._get_output_signature(dataset, batch_size=None, collate_fn=collate_fn, collate_fn_args=collate_fn_args) output_columns = list(output_signature.keys()) feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels] label_cols = [col for col in output_columns if col in model_labels] feature_cols = feature_cols[0] if len(feature_cols) == 1 else feature_cols label_cols = label_cols[0] if len(label_cols) == 1 else label_cols if drop_remainder is None: drop_remainder = shuffle tf_dataset = dataset.to_tf_dataset(columns=feature_cols, label_cols=label_cols, batch_size=batch_size, shuffle=shuffle, drop_remainder=drop_remainder, collate_fn=collate_fn, collate_fn_args=collate_fn_args, prefetch=prefetch) return tf_dataset def compile(self, optimizer='rmsprop', loss='auto_with_warning', metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs): if loss in ('auto_with_warning', 'passthrough'): logger.info("No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss. You can also specify `loss='auto'` to get the internal loss without printing this info string.") loss = 'auto' if loss == 'auto': loss = dummy_loss self._using_dummy_loss = True else: self._using_dummy_loss = False parent_args = list(inspect.signature(keras.Model.compile).parameters.keys()) if 'steps_per_execution' in parent_args: super().compile(optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, steps_per_execution=steps_per_execution, **kwargs) else: super().compile(optimizer=optimizer, loss=loss, metrics=metrics, loss_weights=loss_weights, weighted_metrics=weighted_metrics, run_eagerly=run_eagerly, experimental_steps_per_execution=steps_per_execution, **kwargs) def compute_loss(self, *args, **kwargs): if hasattr(keras.Model, 'compute_loss'): return super().compute_loss(*args, **kwargs) else: warnings.warn('The old compute_loss method is deprecated as it conflicts with the Keras compute_loss method added in TF 2.8. If you want the original HF compute_loss, please call hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, calling compute_loss() will get the Keras method instead.', FutureWarning) return self.hf_compute_loss(*args, **kwargs) def get_label_to_output_name_mapping(self): arg_names = list(inspect.signature(self.call).parameters) if self._label_to_output_map is not None: return self._label_to_output_map elif 'start_positions' in arg_names: return {'start_positions': 'start_logits', 'end_positions': 'end_logits'} elif 'sentence_order_label' in arg_names: return {'labels': 'prediction_logits', 'sentence_order_label': 'sop_logits'} elif 'next_sentence_label' in arg_names: return {'labels': 'prediction_logits', 'next_sentence_label': 'seq_relationship_logits'} elif 'mc_labels' in arg_names: return {'labels': 'logits', 'mc_labels': 'mc_logits'} else: return {} def train_step(self, data): arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for (key, val) in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse('2.11.0'): data = expand_1d(data) (x, y, sample_weight) = keras.utils.unpack_x_y_sample_weight(data) if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() if self._using_dummy_loss and y is not None: if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for (key, val) in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for (key, val) in x.items() if key in label_kwargs} if not y and (not self._using_dummy_loss): raise ValueError('Could not find label column(s) in input dict and no separate labels were provided!') if isinstance(y, dict): y = {label_to_output.get(key, key): val for (key, val) in y.items()} with tf.GradientTape() as tape: if self._using_dummy_loss and 'return_loss' in arg_names: y_pred = self(x, training=True, return_loss=True) else: y_pred = self(x, training=True) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == 'loss': y_pred = y_pred[1] else: y_pred = y_pred[0] (_, y) = y.popitem() elif isinstance(y, dict): y_pred = {key: val for (key, val) in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): if list(y_pred.keys())[0] == 'loss': y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[:len(y)] elif list(y_pred.keys())[0] == 'loss': y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) self.optimizer.minimize(loss, self.trainable_variables, tape=tape) self.compiled_metrics.update_state(y, y_pred, sample_weight) return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def test_step(self, data): arg_names = list(inspect.signature(self.call).parameters) label_kwargs = find_labels(self.__class__) label_to_output = self.get_label_to_output_name_mapping() output_to_label = {val: key for (key, val) in label_to_output.items()} if not self._using_dummy_loss and parse(tf.__version__) < parse('2.11.0'): data = expand_1d(data) (x, y, sample_weight) = keras.utils.unpack_x_y_sample_weight(data) if isinstance(x, dict): x = x.copy() if isinstance(y, dict): y = y.copy() if self._using_dummy_loss and y is not None: arg_names = list(inspect.signature(self.call).parameters) if len(label_kwargs) == 1 and isinstance(y, tf.Tensor): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} label_kwarg = next(iter(label_kwargs)) if label_kwarg not in x: x[label_kwarg] = y elif isinstance(y, dict): if isinstance(x, tf.Tensor): x = {arg_names[0]: x} for (key, val) in y.items(): if key in arg_names and key not in x: x[key] = val elif output_to_label.get(key, None) in arg_names and key not in x: x[output_to_label[key]] = val if y is None: y = {key: val for (key, val) in x.items() if key in label_kwargs} if not y and (not self._using_dummy_loss): raise ValueError('Could not find label column(s) in input dict and no separate labels were provided!') if isinstance(y, dict): y = {label_to_output.get(key, key): val for (key, val) in y.items()} if self._using_dummy_loss and 'return_loss' in arg_names: y_pred = self(x, return_loss=True, training=False) else: y_pred = self(x, training=False) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: loss = None if isinstance(y, dict) and len(y) == 1: if list(y.keys())[0] in y_pred.keys(): y_pred = y_pred[list(y.keys())[0]] elif list(y_pred.keys())[0] == 'loss': y_pred = y_pred[1] else: y_pred = y_pred[0] (_, y) = y.popitem() elif isinstance(y, dict): y_pred = {key: val for (key, val) in y_pred.items() if key in y} elif isinstance(y, tuple) or isinstance(y, list): if list(y_pred.keys())[0] == 'loss': y_pred = y_pred.to_tuple()[1:] else: y_pred = y_pred.to_tuple() y_pred = y_pred[:len(y)] elif list(y_pred.keys())[0] == 'loss': y_pred = y_pred[1] else: y_pred = y_pred[0] if loss is None: loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses) self.compiled_metrics.update_state(y, y_pred, sample_weight) return_metrics = {} for metric in self.metrics: result = metric.result() if isinstance(result, dict): return_metrics.update(result) else: return_metrics[metric.name] = result return return_metrics def create_model_card(self, output_dir, model_name: str, language: Optional[str]=None, license: Optional[str]=None, tags: Optional[str]=None, finetuned_from: Optional[str]=None, tasks: Optional[str]=None, dataset_tags: Optional[Union[str, List[str]]]=None, dataset: Optional[Union[str, List[str]]]=None, dataset_args: Optional[Union[str, List[str]]]=None): from .modelcard import TrainingSummary training_summary = TrainingSummary.from_keras(self, keras_history=self.history, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args) model_card = training_summary.to_model_card() with open(os.path.join(output_dir, 'README.md'), 'w') as f: f.write(model_card) def set_input_embeddings(self, value): main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError('The model does not implements the base_model_prefix attribute.') try: main_layer.set_input_embeddings(value) except AttributeError: logger.info('Building the model') self.build_in_name_scope() main_layer.set_input_embeddings(value) def get_output_embeddings(self) -> Union[None, keras.layers.Layer]: if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_output_embeddings() except AttributeError: logger.info('Building the model') self.build_in_name_scope() return lm_head().get_output_embeddings() return None def set_output_embeddings(self, value): if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_output_embeddings(value) except AttributeError: logger.info('Building the model') self.build_in_name_scope() lm_head.set_output_embeddings(value) def get_output_layer_with_bias(self) -> Union[None, keras.layers.Layer]: warnings.warn('The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.', FutureWarning) return self.get_lm_head() def get_prefix_bias_name(self) -> Union[None, str]: warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return None def get_bias(self) -> Union[None, Dict[str, tf.Variable]]: if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: return lm_head.get_bias() except AttributeError: self.build_in_name_scope() return lm_head.get_bias() return None def set_bias(self, value): if self.get_lm_head() is not None: lm_head = self.get_lm_head() try: lm_head.set_bias(value) except AttributeError: self.build_in_name_scope() lm_head.set_bias(value) def get_lm_head(self) -> keras.layers.Layer: return None def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> Union[keras.layers.Embedding, tf.Variable]: if isinstance(self.get_input_embeddings(), keras.layers.Embedding): return self._v2_resized_token_embeddings(new_num_tokens) if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) model_embeds = self._resize_token_embeddings(new_num_tokens) self.config.vocab_size = new_num_tokens return model_embeds def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int]=None) -> keras.layers.Embedding: if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self.get_input_embeddings() model_embeds = self._v2_resize_token_embeddings(new_num_tokens) self.config.vocab_size = new_num_tokens return model_embeds def _get_word_embedding_weight(model, embedding_layer): if isinstance(embedding_layer, tf.Tensor): return embedding_layer embeds = getattr(embedding_layer, 'weight', None) if embeds is not None: return embeds embeds = getattr(embedding_layer, 'decoder', None) if embeds is not None: return embeds model.build_in_name_scope() embeds = getattr(embedding_layer, 'weight', None) if embeds is not None: return embeds embeds = getattr(embedding_layer, 'decoder', None) if embeds is not None: return embeds return None def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) if self.get_output_embeddings() is not None: old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _v2_resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) if self.get_bias() is not None: old_lm_head_bias = self.get_bias() new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) tied_weights = self.get_input_embeddings() == self.get_output_embeddings() if self.get_output_embeddings() is not None and (not tied_weights): old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) self.set_output_embeddings(new_lm_head_decoder) return self.get_input_embeddings() def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): new_lm_head_bias = {} for (attr, weight) in old_lm_head_bias.items(): (first_dim, old_num_tokens) = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens] if tf.math.greater(size_diff, 0): padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy] bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True) bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False) else: slice_from = [0] if first_dim is None else [0, 0] current_bias = tf.slice(weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)) bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True) new_bias = self.add_weight(shape=final_shape, initializer='zeros', trainable=True, name=weight.name.split(':')[0]) init_bias = tf.where(bias_mask, current_bias, new_bias.value()) new_bias.assign(init_bias) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _v2_get_resized_lm_head_bias(self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int) -> Dict[str, tf.Tensor]: new_lm_head_bias = {} for (attr, weight) in old_lm_head_bias.items(): (first_dim, old_num_tokens) = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) size_diff = new_num_tokens - old_num_tokens if old_num_tokens > new_num_tokens: new_bias = weight.value()[..., :new_num_tokens] else: padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape)) new_lm_head_bias[attr] = new_bias return new_lm_head_bias def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): new_lm_head_decoder = old_lm_head_decoder is_input_output_equals = tf.reduce_any(self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder) if old_lm_head_decoder is not None and (not is_input_output_equals): old_embedding_dim = shape_list(old_lm_head_decoder)[1] (decoder_mask, current_decoder) = init_copy_embeddings(old_lm_head_decoder, new_num_tokens) new_lm_head_decoder = self.add_weight(shape=(new_num_tokens, old_embedding_dim), initializer='zeros', trainable=True, name=old_lm_head_decoder.name.split(':')[0]) init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value()) new_lm_head_decoder.assign(init_decoder) return new_lm_head_decoder def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable: old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, 'initializer_range', 0.02) (embeddings_mask, current_embeddings) = init_copy_embeddings(old_embeddings, new_num_tokens) new_embeddings = self.add_weight(name=old_embeddings.name.split(':')[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32) init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value()) new_embeddings.assign(init_embeddings) return new_embeddings def _v2_get_resized_embeddings(self, old_embeddings: keras.layers.Embedding, new_num_tokens: int) -> keras.layers.Embedding: init_range = 0.02 potential_initialization_variable_names = ['initializer_range', 'initializer_factor', 'init_std'] for var_name in potential_initialization_variable_names: if hasattr(self.config, var_name): init_range = getattr(self.config, var_name) new_embeddings = keras.layers.Embedding(input_dim=new_num_tokens, output_dim=old_embeddings.output_dim, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=init_range), name=old_embeddings.embeddings.name[:-13]) new_embeddings(tf.constant([[0]])) if old_embeddings.input_dim >= new_num_tokens: init_embeddings = old_embeddings.embeddings[:new_num_tokens] else: init_embeddings = tf.concat([old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim:]], axis=0) new_embeddings.embeddings.assign(init_embeddings) return new_embeddings def prune_heads(self, heads_to_prune): raise NotImplementedError def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, signatures=None, max_shard_size: Union[int, str]='5GB', create_pr: bool=False, safe_serialization: bool=False, token: Optional[Union[str, bool]]=None, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None: kwargs['token'] = token if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) if saved_model: if getattr(self.config, 'torch_dtype', None) is not None and (not isinstance(self.config.torch_dtype, str)): self.config.torch_dtype = str(self.config.torch_dtype).split('.')[1] if signatures is None: serving_default = self.serving.get_concrete_function(self.input_signature) if any((spec.dtype == tf.int32 for spec in self.input_signature.values())): int64_spec = {key: tf.TensorSpec(shape=spec.shape, dtype=tf.int64 if spec.dtype == tf.int32 else spec.dtype, name=spec.name) for (key, spec) in self.input_signature.items()} int64_serving = self.serving.get_concrete_function(int64_spec) signatures = {'serving_default': serving_default, 'int64_serving': int64_serving} else: signatures = serving_default saved_model_dir = os.path.join(save_directory, 'saved_model', str(version)) self.save(saved_model_dir, include_optimizer=False, signatures=signatures) logger.info(f'Saved model created in {saved_model_dir}') self.config.architectures = [self.__class__.__name__[2:]] if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) self.config.save_pretrained(save_directory) if self.can_generate(): self.generation_config.save_pretrained(save_directory) weights_name = SAFE_WEIGHTS_NAME if safe_serialization else TF2_WEIGHTS_NAME output_model_file = os.path.join(save_directory, weights_name) (shards, index) = tf_shard_checkpoint(self.weights, max_shard_size, weights_name=weights_name) for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) weights_no_suffix = weights_name.replace('.bin', '').replace('.safetensors', '') if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and (filename not in shards.keys()): os.remove(full_filename) if index is None: if safe_serialization: state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in self.weights} safe_save_file(state_dict, output_model_file, metadata={'format': 'tf'}) else: self.save_weights(output_model_file) logger.info(f'Model weights saved in {output_model_file}') else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else TF2_WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, save_index_file) with open(save_index_file, 'w', encoding='utf-8') as index_file: content = json.dumps(index, indent=2, sort_keys=True) + '\n' index_file.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') for (shard_file, shard) in shards.items(): if safe_serialization: shard_state_dict = {strip_model_name_and_prefix(w.name): w.value() for w in shard} safe_save_file(shard_state_dict, os.path.join(save_directory, shard_file), metadata={'format': 'tf'}) else: with h5py.File(os.path.join(save_directory, shard_file), mode='w') as shard_file: layers = [] for layer in sorted(shard, key=lambda x: x.name): if 'model.' in layer.name or len(layer.name.split('/')) == 1: layer_name = layer.name else: layer_name = '/'.join(layer.name.split('/')[1:]) param_dset = shard_file.create_dataset(layer_name, layer.numpy().shape, dtype=layer.numpy().dtype) param_dset[:] = layer.numpy() layers.append(layer_name.encode('utf8')) save_attributes_to_hdf5_group(shard_file, 'layer_names', layers) if push_to_hub: self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', use_safetensors: bool=None, **kwargs): from_pt = kwargs.pop('from_pt', False) resume_download = kwargs.pop('resume_download', None) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) use_auth_token = kwargs.pop('use_auth_token', None) trust_remote_code = kwargs.pop('trust_remote_code', None) _ = kwargs.pop('mirror', None) load_weight_prefix = kwargs.pop('load_weight_prefix', None) from_pipeline = kwargs.pop('_from_pipeline', None) from_auto_class = kwargs.pop('_from_auto', False) subfolder = kwargs.pop('subfolder', '') commit_hash = kwargs.pop('_commit_hash', None) tf_to_pt_weight_rename = kwargs.pop('tf_to_pt_weight_rename', None) _ = kwargs.pop('adapter_kwargs', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if trust_remote_code is True: logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.') user_agent = {'file_type': 'model', 'framework': 'tensorflow', 'from_auto_class': from_auto_class} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline if is_offline_mode() and (not local_files_only): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True if use_safetensors is None and (not is_safetensors_available()): use_safetensors = False if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path (config, model_kwargs) = cls.config_class.from_pretrained(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, _commit_hash=commit_hash, **kwargs) else: model_kwargs = kwargs if commit_hash is None: commit_hash = getattr(config, '_commit_hash', None) is_sharded = False if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) is_sharded = True elif use_safetensors is not False and os.path.isfile(os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile(os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME) is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME) is_sharded = True elif use_safetensors: raise EnvironmentError(f'Error no file named {SAFE_WEIGHTS_NAME} or {SAFE_WEIGHTS_INDEX_NAME} found in directory {pretrained_model_name_or_path}. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`.') elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)) or os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): raise EnvironmentError(f'Error no file named {TF2_WEIGHTS_NAME} or {SAFE_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.') else: raise EnvironmentError(f'Error no file named {TF2_WEIGHTS_NAME}, {SAFE_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}.') elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(pretrained_model_name_or_path + '.index'): archive_file = pretrained_model_name_or_path + '.index' is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: if from_pt: filename = WEIGHTS_NAME elif use_safetensors is not False: filename = SAFE_WEIGHTS_NAME else: filename = TF2_WEIGHTS_NAME try: cached_file_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'resume_download': resume_download, 'local_files_only': local_files_only, 'token': token, 'user_agent': user_agent, 'revision': revision, 'subfolder': subfolder, '_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_commit_hash': commit_hash} resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: filename = TF2_WEIGHTS_NAME resolved_archive_file = cached_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **cached_file_kwargs) if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: resolved_archive_file = cached_file(pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None and filename == WEIGHTS_NAME: resolved_archive_file = cached_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True if resolved_archive_file is None: has_file_kwargs = {'revision': revision, 'proxies': proxies, 'token': token, 'cache_dir': cache_dir, 'local_files_only': local_files_only} if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs): is_sharded = True elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {TF2_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those weights.') else: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}') except EnvironmentError: raise except Exception: raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME} or {TF_WEIGHTS_NAME}") if is_local: logger.info(f'loading weights file {archive_file}') resolved_archive_file = archive_file filename = resolved_archive_file.split(os.path.sep)[-1] else: logger.info(f'loading weights file {filename} from cache at {resolved_archive_file}') else: resolved_archive_file = None if is_sharded: (resolved_archive_file, sharded_metadata) = get_checkpoint_shard_files(pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, _commit_hash=commit_hash) safetensors_from_pt = False if filename == SAFE_WEIGHTS_NAME: with safe_open(resolved_archive_file, framework='tf') as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get('format') not in ['pt', 'tf', 'flax', 'mlx']: raise OSError(f'The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata. Make sure you save your model with the `save_pretrained` method.') safetensors_from_pt = safetensors_metadata.get('format') == 'pt' elif filename == SAFE_WEIGHTS_INDEX_NAME: with safe_open(resolved_archive_file[0], framework='tf') as f: safetensors_metadata = f.metadata() if safetensors_metadata is None or safetensors_metadata.get('format') not in ['pt', 'tf', 'flax', 'mlx']: raise OSError(f'The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata. Make sure you save your model with the `save_pretrained` method.') safetensors_from_pt = safetensors_metadata.get('format') == 'pt' config.name_or_path = pretrained_model_name_or_path if cls._requires_load_weight_prefix and model_kwargs.get('name') is not None: model_kwargs['load_weight_prefix'] = load_weight_prefix + '/' + model_kwargs.get('name') model = cls(config, *model_args, **model_kwargs) if tf_to_pt_weight_rename is None and hasattr(model, 'tf_to_pt_weight_rename'): tf_to_pt_weight_rename = model.tf_to_pt_weight_rename if from_pt: from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename) if load_weight_prefix is not None: with tf.compat.v1.variable_scope(load_weight_prefix): model.build_in_name_scope() else: model.build_in_name_scope() if safetensors_from_pt and (not is_sharded): from .modeling_tf_pytorch_utils import load_pytorch_state_dict_in_tf2_model with safe_open(resolved_archive_file, framework='tf') as safetensors_archive: return load_pytorch_state_dict_in_tf2_model(model, safetensors_archive, tf_inputs=False, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename) elif safetensors_from_pt: from .modeling_tf_pytorch_utils import load_sharded_pytorch_safetensors_in_tf2_model return load_sharded_pytorch_safetensors_in_tf2_model(model, resolved_archive_file, tf_inputs=False, allow_missing_keys=True, output_loading_info=output_loading_info, _prefix=load_weight_prefix, ignore_mismatched_sizes=ignore_mismatched_sizes, tf_to_pt_weight_rename=tf_to_pt_weight_rename) try: if is_sharded: for file in resolved_archive_file: (os.path.isfile(file), f'Error retrieving files {file}') if filename == SAFE_WEIGHTS_INDEX_NAME: (missing_keys, unexpected_keys, mismatched_keys) = load_tf_sharded_weights_from_safetensors(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix) else: (missing_keys, unexpected_keys, mismatched_keys) = load_tf_sharded_weights(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix) else: (missing_keys, unexpected_keys, mismatched_keys) = load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=load_weight_prefix) except OSError as e: try: with open(resolved_archive_file) as f: if f.read().startswith('version'): raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise OSError('Unable to load weights from h5 file. If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. ') if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning(f'Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.warning(f'All model checkpoint layers were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.warning(f'All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs) except OSError: logger.info('Generation config file not found, using a generation config created from the model config.') pass if output_loading_info: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys} return (model, loading_info) return model def push_to_hub(self, repo_id: str, use_temp_dir: Optional[bool]=None, commit_message: Optional[str]=None, private: Optional[bool]=None, max_shard_size: Optional[Union[int, str]]='10GB', token: Optional[Union[bool, str]]=None, use_auth_token: Optional[Union[bool, str]]=None, create_pr: bool=False, **base_model_card_args) -> str: if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if 'repo_path_or_name' in base_model_card_args: warnings.warn('The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` instead.') repo_id = base_model_card_args.pop('repo_path_or_name') repo_url = base_model_card_args.pop('repo_url', None) organization = base_model_card_args.pop('organization', None) if os.path.isdir(repo_id): working_dir = repo_id repo_id = repo_id.split(os.path.sep)[-1] else: working_dir = repo_id.split('/')[-1] repo_id = self._create_repo(repo_id, private=private, token=token, repo_url=repo_url, organization=organization) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) self.save_pretrained(work_dir, max_shard_size=max_shard_size) if hasattr(self, 'history') and hasattr(self, 'create_model_card'): base_model_card_args = {'output_dir': work_dir, 'model_name': Path(repo_id).name} base_model_card_args.update(base_model_card_args) self.create_model_card(**base_model_card_args) self._upload_modified_files(work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr) @classmethod def register_for_auto_class(cls, auto_class='TFAutoModel'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class class TFConv1D(keras.layers.Layer): def __init__(self, nf, nx, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.nf = nf self.nx = nx self.initializer_range = initializer_range def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight('weight', shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)) self.bias = self.add_weight('bias', shape=[1, self.nf], initializer=tf.zeros_initializer()) def call(self, x): (bz, sl) = shape_list(x)[:2] x = tf.reshape(x, [-1, self.nx]) x = tf.matmul(x, self.weight) + self.bias x = tf.reshape(x, [bz, sl, self.nf]) return x class TFSharedEmbeddings(keras.layers.Layer): def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float]=None, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.initializer_range = hidden_size ** (-0.5) if initializer_range is None else initializer_range warnings.warn('`TFSharedEmbeddings` is scheduled for deletion in v4.32, use `keras.layers.Embedding` instead.', DeprecationWarning) def build(self, input_shape): self.weight = self.add_weight('weight', shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)) super().build(input_shape) def get_config(self): config = {'vocab_size': self.vocab_size, 'hidden_size': self.hidden_size, 'initializer_range': self.initializer_range} base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs: tf.Tensor, mode: str='embedding') -> tf.Tensor: if mode == 'embedding': return self._embedding(inputs) elif mode == 'linear': return self._linear(inputs) else: raise ValueError(f'mode {mode} is not valid.') def _embedding(self, input_ids): return tf.gather(self.weight, input_ids) def _linear(self, inputs): first_dims = shape_list(inputs)[:-1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.weight, transpose_b=True) return tf.reshape(logits, first_dims + [self.vocab_size]) class TFSequenceSummary(keras.layers.Layer): def __init__(self, config: PretrainedConfig, initializer_range: float=0.02, **kwargs): super().__init__(**kwargs) self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last' if self.summary_type == 'attn': raise NotImplementedError self.has_summary = hasattr(config, 'summary_use_proj') and config.summary_use_proj if self.has_summary: if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0): num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = keras.layers.Dense(num_classes, kernel_initializer=get_initializer(initializer_range), name='summary') self.has_activation = False activation_string = getattr(config, 'summary_activation', None) if activation_string is not None: self.has_activation = True self.activation = get_tf_activation(activation_string) self.has_first_dropout = hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0 if self.has_first_dropout: self.first_dropout = keras.layers.Dropout(config.summary_first_dropout) self.has_last_dropout = hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0 if self.has_last_dropout: self.last_dropout = keras.layers.Dropout(config.summary_last_dropout) self.hidden_size = config.hidden_size def call(self, inputs, cls_index=None, training=False): if not isinstance(inputs, (dict, tuple, list)): hidden_states = inputs elif isinstance(inputs, (tuple, list)): hidden_states = inputs[0] cls_index = inputs[1] if len(inputs) > 1 else None assert len(inputs) <= 2, 'Too many inputs.' else: hidden_states = inputs.get('hidden_states') cls_index = inputs.get('cls_index', None) if self.summary_type == 'last': output = hidden_states[:, -1] elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == 'cls_index': hidden_shape = shape_list(hidden_states) if cls_index is None: cls_index = tf.fill(hidden_shape[:-2], hidden_shape[-2] - 1) cls_shape = shape_list(cls_index) if len(cls_shape) <= len(hidden_shape) - 2: cls_index = tf.expand_dims(cls_index, axis=-1) output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2) output = tf.squeeze(output, axis=len(hidden_shape) - 2) elif self.summary_type == 'attn': raise NotImplementedError if self.has_first_dropout: output = self.first_dropout(output, training=training) if self.has_summary: output = self.summary(output) if self.has_activation: output = self.activation(output) if self.has_last_dropout: output = self.last_dropout(output, training=training) return output def build(self, input_shape): if self.built: return self.built = True if getattr(self, 'summary', None) is not None: with tf.name_scope('summary'): self.summary.build(self.hidden_size) def get_initializer(initializer_range: float=0.02) -> keras.initializers.TruncatedNormal: return keras.initializers.TruncatedNormal(stddev=initializer_range) # File: transformers-main/src/transformers/modeling_utils.py import collections import copy import functools import gc import importlib.metadata import inspect import itertools import json import os import re import shutil import tempfile import warnings from contextlib import contextmanager from dataclasses import dataclass from functools import partial, wraps from threading import Thread from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union from zipfile import is_zipfile import torch from huggingface_hub import split_torch_state_dict_into_shards from packaging import version from torch import Tensor, nn from torch.nn import CrossEntropyLoss, Identity from torch.utils.checkpoint import checkpoint from .activations import get_activation from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save from .generation import GenerationConfig, GenerationMixin from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled from .pytorch_utils import Conv1D, apply_chunking_to_forward, find_pruneable_heads_and_indices, id_tensor_storage, is_torch_greater_or_equal_than_1_13, prune_conv1d_layer, prune_layer, prune_linear_layer from .quantizers import AutoHfQuantizer, HfQuantizer from .quantizers.quantizers_utils import get_module_from_name from .safetensors_conversion import auto_conversion from .utils import ACCELERATE_MIN_VERSION, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, DUMMY_INPUTS, FLAX_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, ModelOutput, PushToHubMixin, cached_file, copy_func, download_url, extract_commit_hash, has_file, is_accelerate_available, is_bitsandbytes_available, is_flash_attn_2_available, is_offline_mode, is_optimum_available, is_peft_available, is_remote_url, is_safetensors_available, is_torch_sdpa_available, is_torch_xla_available, logging, replace_return_docstrings, strtobool from .utils.hub import convert_file_size_to_int, create_and_tag_model_card, get_checkpoint_shard_files from .utils.import_utils import ENV_VARS_TRUE_VALUES, is_sagemaker_mp_enabled, is_torch_fx_proxy, is_torchdynamo_compiling from .utils.quantization_config import BitsAndBytesConfig, QuantizationMethod XLA_USE_BF16 = os.environ.get('XLA_USE_BF16', '0').upper() XLA_DOWNCAST_BF16 = os.environ.get('XLA_DOWNCAST_BF16', '0').upper() if is_accelerate_available(): from accelerate import dispatch_model, infer_auto_device_map, init_empty_weights from accelerate.hooks import add_hook_to_module from accelerate.utils import check_tied_parameters_on_same_device, extract_model_from_parallel, find_tied_parameters, get_balanced_memory, get_max_memory, load_offloaded_weights, offload_weight, save_offload_index, set_module_tensor_to_device accelerate_version = version.parse(importlib.metadata.version('accelerate')) if accelerate_version >= version.parse('0.31'): from accelerate.utils.modeling import get_state_dict_from_offload if is_safetensors_available(): from safetensors import safe_open from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file logger = logging.get_logger(__name__) _init_weights = True def is_fsdp_enabled(): return torch.distributed.is_available() and torch.distributed.is_initialized() and (strtobool(os.environ.get('ACCELERATE_USE_FSDP', 'False')) == 1) and (strtobool(os.environ.get('FSDP_CPU_RAM_EFFICIENT_LOADING', 'False')) == 1) def is_local_dist_rank_0(): return torch.distributed.is_available() and torch.distributed.is_initialized() and (int(os.environ.get('LOCAL_RANK', -1)) == 0) if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp from smdistributed.modelparallel import __version__ as SMP_VERSION IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse('1.10') else: IS_SAGEMAKER_MP_POST_1_10 = False if is_peft_available(): from .utils import find_adapter_config_file TORCH_INIT_FUNCTIONS = {'uniform_': nn.init.uniform_, 'normal_': nn.init.normal_, 'trunc_normal_': nn.init.trunc_normal_, 'constant_': nn.init.constant_, 'xavier_uniform_': nn.init.xavier_uniform_, 'xavier_normal_': nn.init.xavier_normal_, 'kaiming_uniform_': nn.init.kaiming_uniform_, 'kaiming_normal_': nn.init.kaiming_normal_, 'uniform': nn.init.uniform, 'normal': nn.init.normal, 'xavier_uniform': nn.init.xavier_uniform, 'xavier_normal': nn.init.xavier_normal, 'kaiming_uniform': nn.init.kaiming_uniform, 'kaiming_normal': nn.init.kaiming_normal} @contextmanager def no_init_weights(_enable=True): global _init_weights old_init_weights = _init_weights if _enable: _init_weights = False def _skip_init(*args, **kwargs): pass for (name, init_func) in TORCH_INIT_FUNCTIONS.items(): setattr(torch.nn.init, name, _skip_init) try: yield finally: _init_weights = old_init_weights if _enable: for (name, init_func) in TORCH_INIT_FUNCTIONS.items(): setattr(torch.nn.init, name, init_func) def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, 'ModuleUtilsMixin']): try: return next(parameter.parameters()).device except StopIteration: def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, 'ModuleUtilsMixin']): try: return next(parameter.parameters()).dtype except StopIteration: def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, 'ModuleUtilsMixin']): last_dtype = None for t in parameter.parameters(): last_dtype = t.dtype if t.is_floating_point(): if XLA_USE_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): return torch.bfloat16 if XLA_DOWNCAST_BF16 in ENV_VARS_TRUE_VALUES and is_torch_xla_available(): if t.dtype == torch.float: return torch.bfloat16 if t.dtype == torch.double: return torch.float32 return t.dtype if last_dtype is not None: return last_dtype def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for (k, v) in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = parameter._named_members(get_members_fn=find_tensor_attributes) last_tuple = None for tuple in gen: last_tuple = tuple if tuple[1].is_floating_point(): return tuple[1].dtype if last_tuple is not None: return last_tuple[1].dtype for t in parameter.buffers(): last_dtype = t.dtype if t.is_floating_point(): return t.dtype return last_dtype def get_state_dict_float_dtype(state_dict): for t in state_dict.values(): if t.is_floating_point(): return t.dtype raise ValueError("couldn't find any floating point dtypes in state_dict") def get_state_dict_dtype(state_dict): for t in state_dict.values(): if t.is_floating_point(): return t.dtype else: return next(state_dict.values()).dtype def dtype_byte_size(dtype): if dtype == torch.bool: return 1 / 8 bit_search = re.search('[^\\d](\\d+)_?', str(dtype)) if bit_search is None: raise ValueError(f'`dtype` is not a valid dtype: {dtype}.') bit_size = int(bit_search.groups()[0]) return bit_size // 8 def check_support_param_buffer_assignment(model_to_load, state_dict, start_prefix=''): if len([key for key in state_dict if key.startswith(start_prefix)]) == 0: return False if is_deepspeed_zero3_enabled(): return False if not getattr(model_to_load, '_supports_param_buffer_assignment', True): logger.debug(f'{model_to_load.__class__.__name__} does not support param buffer assignment, loading will be slower') return False first_key = list(model_to_load.state_dict().keys())[0] if start_prefix + first_key in state_dict: return state_dict[start_prefix + first_key].dtype == model_to_load.state_dict()[first_key].dtype return False def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str]='10GB', weights_name: str=WEIGHTS_NAME): logger.warning('Note that `shard_checkpoint` is deprecated and will be removed in v4.44. We recommend you using split_torch_state_dict_into_shards from huggingface_hub library') max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [{}] last_block_size = 0 total_size = 0 storage_id_to_block = {} for (key, weight) in state_dict.items(): if isinstance(weight, str): continue else: storage_id = id_tensor_storage(weight) if storage_id in storage_id_to_block and weight.device != torch.device('meta'): block_id = storage_id_to_block[storage_id] sharded_state_dicts[block_id][key] = weight continue weight_size = weight.numel() * dtype_byte_size(weight.dtype) if last_block_size + weight_size > max_shard_size and len(sharded_state_dicts[-1]) > 0: sharded_state_dicts.append({}) last_block_size = 0 sharded_state_dicts[-1][key] = weight last_block_size += weight_size total_size += weight_size storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 if len(sharded_state_dicts) == 1: return ({weights_name: sharded_state_dicts[0]}, None) weight_map = {} shards = {} for (idx, shard) in enumerate(sharded_state_dicts): shard_file = weights_name.replace('.bin', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin') shard_file = shard_file.replace('.safetensors', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors') shards[shard_file] = shard for key in shard.keys(): weight_map[key] = shard_file metadata = {'total_size': total_size} index = {'metadata': metadata, 'weight_map': weight_map} return (shards, index) def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True): index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) index_present = os.path.isfile(index_file) safe_index_present = os.path.isfile(safe_index_file) if not index_present and (not (safe_index_present and is_safetensors_available())): filenames = (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) if is_safetensors_available() else (WEIGHTS_INDEX_NAME,) raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.") load_safe = False if safe_index_present: if prefer_safe: if is_safetensors_available(): load_safe = True else: logger.warning(f'Cannot load sharded checkpoint at {folder} safely since safetensors is not installed!') elif not index_present: load_safe = True load_index = safe_index_file if load_safe else index_file with open(load_index, 'r', encoding='utf-8') as f: index = json.load(f) shard_files = list(set(index['weight_map'].values())) loaded_keys = index['weight_map'].keys() model_keys = model.state_dict().keys() missing_keys = [key for key in model_keys if key not in loaded_keys] unexpected_keys = [key for key in loaded_keys if key not in model_keys] if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0): error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}' if len(missing_keys) > 0: str_missing_keys = ','.join([f'"{k}"' for k in missing_keys]) error_message += f'\nMissing key(s): {str_missing_keys}.' if len(unexpected_keys) > 0: str_unexpected_keys = ','.join([f'"{k}"' for k in unexpected_keys]) error_message += f'\nMissing key(s): {str_unexpected_keys}.' raise RuntimeError(error_message) weights_only_kwarg = {'weights_only': True} if is_torch_greater_or_equal_than_1_13 else {} loader = safe_load_file if load_safe else partial(torch.load, map_location='cpu', **weights_only_kwarg) for shard_file in shard_files: state_dict = loader(os.path.join(folder, shard_file)) model.load_state_dict(state_dict, strict=False) del state_dict gc.collect() return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) def load_state_dict(checkpoint_file: Union[str, os.PathLike], is_quantized: bool=False): if checkpoint_file.endswith('.safetensors') and is_safetensors_available(): with safe_open(checkpoint_file, framework='pt') as f: metadata = f.metadata() if metadata.get('format') not in ['pt', 'tf', 'flax', 'mlx']: raise OSError(f'The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure you save your model with the `save_pretrained` method.') return safe_load_file(checkpoint_file) try: if (is_deepspeed_zero3_enabled() and torch.distributed.is_initialized() and (torch.distributed.get_rank() > 0) or (is_fsdp_enabled() and (not is_local_dist_rank_0()))) and (not is_quantized): map_location = 'meta' else: map_location = 'cpu' extra_args = {} if isinstance(checkpoint_file, str) and map_location != 'meta' and (version.parse(torch.__version__) >= version.parse('2.1.0')) and is_zipfile(checkpoint_file): extra_args = {'mmap': True} weights_only_kwarg = {'weights_only': True} if is_torch_greater_or_equal_than_1_13 else {} return torch.load(checkpoint_file, map_location=map_location, **weights_only_kwarg, **extra_args) except Exception as e: try: with open(checkpoint_file) as f: if f.read(7) == 'version': raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.') else: raise ValueError(f'Unable to locate the file {checkpoint_file} which is necessary to load this pretrained model. Make sure you have saved the model properly.') from e except (UnicodeDecodeError, ValueError): raise OSError(f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' at '{checkpoint_file}'. If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True.") def set_initialized_submodules(model, state_dict_keys): not_initialized_submodules = {} for (module_name, module) in model.named_modules(): loaded_keys = {k.replace(f'{module_name}.', '') for k in state_dict_keys if k.startswith(f'{module_name}.')} if loaded_keys.issuperset(module.state_dict()): module._is_hf_initialized = True else: not_initialized_submodules[module_name] = module return not_initialized_submodules def _end_ptr(tensor: torch.Tensor) -> int: if tensor.nelement(): stop = tensor.view(-1)[-1].data_ptr() + tensor.element_size() else: stop = tensor.data_ptr() return stop def _get_tied_weight_keys(module: nn.Module, prefix=''): tied_weight_keys = [] if getattr(module, '_tied_weights_keys', None) is not None: names = [f'{prefix}.{k}' if prefix else k for k in module._tied_weights_keys] tied_weight_keys.extend(names) if getattr(module, '_dynamic_tied_weights_keys', None) is not None: names = [f'{prefix}.{k}' if prefix else k for k in module._dynamic_tied_weights_keys] tied_weight_keys.extend(names) for (name, submodule) in module.named_children(): local_prefix = f'{prefix}.{name}' if prefix else name tied_weight_keys.extend(_get_tied_weight_keys(submodule, prefix=local_prefix)) return tied_weight_keys def _find_disjoint(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> Tuple[List[Set[str]], List[str]]: filtered_tensors = [] for shared in tensors: if len(shared) < 2: filtered_tensors.append(shared) continue areas = [] for name in shared: tensor = state_dict[name] areas.append((tensor.data_ptr(), _end_ptr(tensor), name)) areas.sort() (_, last_stop, last_name) = areas[0] filtered_tensors.append({last_name}) for (start, stop, name) in areas[1:]: if start >= last_stop: filtered_tensors.append({name}) else: filtered_tensors[-1].add(name) last_stop = stop disjoint_tensors = [] shared_tensors = [] for tensors in filtered_tensors: if len(tensors) == 1: disjoint_tensors.append(tensors.pop()) else: shared_tensors.append(tensors) return (shared_tensors, disjoint_tensors) def _find_identical(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> Tuple[List[Set[str]], Set[str]]: shared_tensors = [] identical = [] for shared in tensors: if len(shared) < 2: continue areas = collections.defaultdict(set) for name in shared: tensor = state_dict[name] area = (tensor.device, tensor.data_ptr(), _end_ptr(tensor)) areas[area].add(name) if len(areas) == 1: identical.append(shared) else: shared_tensors.append(shared) return (shared_tensors, identical) def _load_state_dict_into_model(model_to_load, state_dict, start_prefix, assign_to_params_buffers=False): old_keys = [] new_keys = [] renamed_keys = {} renamed_gamma = {} renamed_beta = {} warning_msg = f'A pretrained model of type `{model_to_load.__class__.__name__}` ' for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') renamed_gamma[key] = new_key if not renamed_gamma else renamed_gamma if 'beta' in key: new_key = key.replace('beta', 'bias') renamed_beta[key] = new_key if not renamed_beta else renamed_beta if new_key: old_keys.append(key) new_keys.append(new_key) renamed_keys = {**renamed_gamma, **renamed_beta} if renamed_keys: warning_msg += 'contains parameters that have been renamed internally (a few are listed below but more are present in the model):\n' for (old_key, new_key) in renamed_keys.items(): warning_msg += f'* `{old_key}` -> `{new_key}`\n' warning_msg += 'If you are using a model from the Hub, consider submitting a PR to adjust these weights and help future users.' logger.info_once(warning_msg) for (old_key, new_key) in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata error_msgs = [] def load(module: nn.Module, state_dict, prefix='', assign_to_params_buffers=False): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) local_metadata['assign_to_params_buffers'] = assign_to_params_buffers args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) if len([key for key in state_dict if key.startswith(prefix)]) > 0: if is_deepspeed_zero3_enabled(): import deepspeed named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] if len(params_to_gather) > 0: with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): if torch.distributed.get_rank() == 0: module._load_from_state_dict(*args) else: module._load_from_state_dict(*args) for (name, child) in module._modules.items(): if child is not None: load(child, state_dict, prefix + name + '.', assign_to_params_buffers) load(model_to_load, state_dict, prefix=start_prefix, assign_to_params_buffers=assign_to_params_buffers) del state_dict return error_msgs def find_submodule_and_param_name(model, long_key, start_prefix): if len(start_prefix) > 0 and long_key.startswith(start_prefix): long_key = '.'.join(long_key.split('.')[1:]) split_key = long_key.split('.') submodule = model while len(split_key) > 1: if hasattr(submodule, split_key[0]): submodule = getattr(submodule, split_key[0]) del split_key[0] else: submodule = None break if submodule == model: submodule = None return (submodule, split_key[0]) def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix): for k in loaded_state_dict_keys: (submodule, param_name) = find_submodule_and_param_name(model, k, start_prefix) if submodule is not None: new_val = getattr(submodule, param_name) if isinstance(new_val, torch.nn.Parameter): new_val = torch.nn.Parameter(new_val.to('meta')) else: new_val = new_val.to('meta') setattr(submodule, param_name, new_val) def _load_state_dict_into_meta_model(model, state_dict, loaded_state_dict_keys, start_prefix, expected_keys, device_map=None, offload_folder=None, offload_index=None, state_dict_folder=None, state_dict_index=None, dtype=None, hf_quantizer=None, is_safetensors=False, keep_in_fp32_modules=None, unexpected_keys=None, pretrained_model_name_or_path=None): error_msgs = [] old_keys = [] new_keys = [] renamed_gamma = {} renamed_beta = {} is_quantized = hf_quantizer is not None warning_msg = f'This model {type(model)}' for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') renamed_gamma[key] = new_key if not renamed_gamma else renamed_gamma if 'beta' in key: new_key = key.replace('beta', 'bias') renamed_beta[key] = new_key if not renamed_beta else renamed_beta if new_key: old_keys.append(key) new_keys.append(new_key) renamed_keys = {**renamed_gamma, **renamed_beta} if renamed_keys: warning_msg += 'contains parameters that have been renamed internally (a few are listed below but more are present in the model):\n' for (old_key, new_key) in renamed_keys.items(): warning_msg += f'* `{old_key}` -> `{new_key}`\n' warning_msg += 'If you are using a model from the Hub, consider submitting a PR to adjust these weights and help future users.' logger.info_once(warning_msg) for (old_key, new_key) in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) is_torch_e4m3fn_available = hasattr(torch, 'float8_e4m3fn') for (param_name, param) in state_dict.items(): if param_name not in loaded_state_dict_keys or param_name not in expected_keys: continue if param_name.startswith(start_prefix): param_name = param_name[len(start_prefix):] module_name = param_name set_module_kwargs = {} is_param_float8_e4m3fn = is_torch_e4m3fn_available and param.dtype == torch.float8_e4m3fn if dtype is not None and torch.is_floating_point(param) and (not is_param_float8_e4m3fn): if keep_in_fp32_modules is not None and any((module_to_keep_in_fp32 in param_name.split('.') for module_to_keep_in_fp32 in keep_in_fp32_modules)) and (dtype == torch.float16): param = param.to(torch.float32) if 'dtype' in list(inspect.signature(set_module_tensor_to_device).parameters): set_module_kwargs['dtype'] = torch.float32 else: param = param.to(dtype) old_param = model splits = param_name.split('.') for split in splits: old_param = getattr(old_param, split) if old_param is None: break if old_param is not None: if dtype is None: param = param.to(old_param.dtype) if old_param.is_contiguous(): param = param.contiguous() set_module_kwargs['value'] = param if device_map is None: param_device = 'cpu' else: while len(module_name) > 0 and module_name not in device_map: module_name = '.'.join(module_name.split('.')[:-1]) if module_name == '' and '' not in device_map: raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] if param_device == 'disk': if not is_safetensors: offload_index = offload_weight(param, param_name, offload_folder, offload_index) elif param_device == 'cpu' and state_dict_index is not None: state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) elif not is_quantized or not hf_quantizer.requires_parameters_quantization or (not hf_quantizer.check_quantized_param(model, param, param_name, state_dict, param_device=param_device, device_map=device_map)): if is_fsdp_enabled(): param_device = 'cpu' if is_local_dist_rank_0() else 'meta' set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs) else: hf_quantizer.create_quantized_param(model, param, param_name, param_device, state_dict, unexpected_keys) if is_fsdp_enabled() or is_deepspeed_zero3_enabled(): (module, tensor_name) = get_module_from_name(model, param_name) value = getattr(module, tensor_name) param_to = 'cpu' if is_fsdp_enabled() and (not is_local_dist_rank_0()): param_to = 'meta' value = type(value)(value.data.to(param_to), **value.__dict__) setattr(module, tensor_name, value) return (error_msgs, offload_index, state_dict_index) def _add_variant(weights_name: str, variant: Optional[str]=None) -> str: if variant is not None: splits = weights_name.split('.') splits = splits[:-1] + [variant] + splits[-1:] weights_name = '.'.join(splits) return weights_name class ModuleUtilsMixin: @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError('You need to install psutil (pip install psutil) to use memory tracing.') process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except ImportError: raise ImportError('You need to install psutil (pip install psutil) to use memory tracing.') process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, 'mem_rss_diff') else 0) return None def add_memory_hooks(self): for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> torch.device: return get_parameter_device(self) @property def dtype(self) -> torch.dtype: return get_parameter_dtype(self) def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min return encoder_extended_attention_mask @staticmethod def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None): if device is not None: warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning) else: device = attention_mask.device (batch_size, seq_length) = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] return extended_attention_mask def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device=None, dtype: torch.float=None) -> Tensor: if dtype is None: dtype = self.dtype if not (attention_mask.dim() == 2 and self.config.is_decoder): if device is not None: warnings.warn('The `device` argument is deprecated and will be removed in v5 of Transformers.', FutureWarning) if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: if self.config.is_decoder: extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(input_shape, attention_mask, device) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min return extended_attention_mask def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor: if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) assert head_mask.dim() == 5, f'head_mask.dim != 5, instead {head_mask.dim()}' head_mask = head_mask.to(dtype=self.dtype) return head_mask def num_parameters(self, only_trainable: bool=False, exclude_embeddings: bool=False) -> int: if exclude_embeddings: embedding_param_names = [f'{name}.weight' for (name, module_type) in self.named_modules() if isinstance(module_type, nn.Embedding)] total_parameters = [parameter for (name, parameter) in self.named_parameters() if name not in embedding_param_names] else: total_parameters = list(self.parameters()) total_numel = [] is_loaded_in_4bit = getattr(self, 'is_loaded_in_4bit', False) if is_loaded_in_4bit: if is_bitsandbytes_available(): import bitsandbytes as bnb else: raise ValueError('bitsandbytes is not installed but it seems that the model has been loaded in 4bit precision, something went wrong make sure to install bitsandbytes with `pip install bitsandbytes`. You also need a GPU. ') for param in total_parameters: if param.requires_grad or not only_trainable: if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if hasattr(param, 'element_size'): num_bytes = param.element_size() elif hasattr(param, 'quant_storage'): num_bytes = param.quant_storage.itemsize else: num_bytes = 1 total_numel.append(param.numel() * 2 * num_bytes) else: total_numel.append(param.numel()) return sum(total_numel) def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int: if not hasattr(self, 'warnings_issued'): self.warnings_issued = {} if self.main_input_name in input_dict: return input_dict[self.main_input_name].numel() elif 'estimate_tokens' not in self.warnings_issued: logger.warning('Could not estimate the number of tokens of the input, floating-point operations will not be computed') self.warnings_issued['estimate_tokens'] = True return 0 def floating_point_ops(self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool=True) -> int: return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): config_class = None base_model_prefix = '' main_input_name = 'input_ids' model_tags = None _auto_class = None _no_split_modules = None _skip_keys_device_placement = None _keep_in_fp32_modules = None _keys_to_ignore_on_load_missing = None _keys_to_ignore_on_load_unexpected = None _keys_to_ignore_on_save = None _tied_weights_keys = None is_parallelizable = False supports_gradient_checkpointing = False _is_stateful = False _supports_flash_attn_2 = False _supports_sdpa = False _supports_cache_class = False _supports_static_cache = False _supports_quantized_cache = False @property def dummy_inputs(self) -> Dict[str, torch.Tensor]: return {'input_ids': torch.tensor(DUMMY_INPUTS)} @property def framework(self) -> str: return 'pt' def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError(f'Parameter config in `{self.__class__.__name__}(config)` should be an instance of class `PretrainedConfig`. To create a model from a pretrained model use `model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`') config = self._autoset_attn_implementation(config, torch_dtype=torch.get_default_dtype(), check_device_map=False) self.config = config self.name_or_path = config.name_or_path self.warnings_issued = {} self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None self._keep_in_fp32_modules = copy.copy(self.__class__._keep_in_fp32_modules) def post_init(self): self.init_weights() self._backward_compatibility_gradient_checkpointing() def dequantize(self): hf_quantizer = getattr(self, 'hf_quantizer', None) if hf_quantizer is None: raise ValueError('You need to first quantize your model in order to dequantize it') return hf_quantizer.dequantize(self) def _backward_compatibility_gradient_checkpointing(self): if self.supports_gradient_checkpointing and getattr(self.config, 'gradient_checkpointing', False): self.gradient_checkpointing_enable() delattr(self.config, 'gradient_checkpointing') def add_model_tags(self, tags: Union[List[str], str]) -> None: if isinstance(tags, str): tags = [tags] if self.model_tags is None: self.model_tags = [] for tag in tags: if tag not in self.model_tags: self.model_tags.append(tag) @classmethod def _from_config(cls, config, **kwargs): torch_dtype = kwargs.pop('torch_dtype', None) use_flash_attention_2 = kwargs.pop('use_flash_attention_2', False) dtype_orig = None if torch_dtype is not None: dtype_orig = cls._set_default_torch_dtype(torch_dtype) config = copy.deepcopy(config) if config._attn_implementation_internal is not None: attn_implementation = config._attn_implementation_internal else: attn_implementation = None config._attn_implementation = kwargs.pop('attn_implementation', attn_implementation) config = cls._autoset_attn_implementation(config, use_flash_attention_2=use_flash_attention_2, check_device_map=False, torch_dtype=torch_dtype) if is_deepspeed_zero3_enabled(): import deepspeed logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model') with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): model = cls(config, **kwargs) else: model = cls(config, **kwargs) if dtype_orig is not None: torch.set_default_dtype(dtype_orig) return model @classmethod def _autoset_attn_implementation(cls, config, use_flash_attention_2: bool=False, torch_dtype: Optional[torch.dtype]=None, device_map: Optional[Union[str, Dict[str, int]]]=None, check_device_map: bool=True): requested_attn_implementation = None if hasattr(config, '_attn_implementation_internal') and config._attn_implementation_internal is not None: if config._attn_implementation != 'flash_attention_2' and use_flash_attention_2: raise ValueError(f'Both attn_implementation="{config._attn_implementation}" and `use_flash_attention_2=True` were used when loading the model, which are not compatible. We recommend to just use `attn_implementation="flash_attention_2"` when loading the model.') if config._attn_implementation not in ['eager', 'sdpa', 'flash_attention_2']: message = f'Specified `attn_implementation="{config._attn_implementation}"` is not supported. The only possible arguments are `attn_implementation="eager"` (manual attention implementation)' if cls._supports_flash_attn_2: message += ', `"attn_implementation=flash_attention_2"` (implementation using flash attention 2)' if cls._supports_sdpa: message += ', `"attn_implementation=sdpa"` (implementation using torch.nn.functional.scaled_dot_product_attention)' raise ValueError(message + '.') requested_attn_implementation = config._attn_implementation_internal if use_flash_attention_2: logger.warning_once('The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.') config._attn_implementation = 'flash_attention_2' if config._attn_implementation == 'flash_attention_2': cls._check_and_enable_flash_attn_2(config, torch_dtype=torch_dtype, device_map=device_map, hard_check_only=False, check_device_map=check_device_map) elif requested_attn_implementation in [None, 'sdpa'] and (not is_torch_xla_available()): config = cls._check_and_enable_sdpa(config, hard_check_only=False if requested_attn_implementation is None else True) if torch.version.hip is not None and config._attn_implementation == 'sdpa' and (torch.cuda.device_count() > 1): logger.warning_once('Using the `SDPA` attention implementation on multi-gpu setup with ROCM may lead to performance issues due to the FA backend. Disabling it to use alternative backends.') torch.backends.cuda.enable_flash_sdp(False) else: config._attn_implementation = 'eager' return config @classmethod def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: if not dtype.is_floating_point: raise ValueError(f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype") logger.info(f'Instantiating {cls.__name__} model under default dtype {dtype}.') dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig @property def base_model(self) -> nn.Module: return getattr(self, self.base_model_prefix, self) @classmethod def can_generate(cls) -> bool: if 'GenerationMixin' in str(cls.prepare_inputs_for_generation) and 'GenerationMixin' in str(cls.generate): return False return True @classmethod def _check_and_enable_flash_attn_2(cls, config, torch_dtype: Optional[torch.dtype]=None, device_map: Optional[Union[str, Dict[str, int]]]=None, check_device_map: bool=True, hard_check_only: bool=False) -> PretrainedConfig: if not cls._supports_flash_attn_2: raise ValueError(f'{cls.__name__} does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co/{config._name_or_path}/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new') if not is_flash_attn_2_available(): preface = 'FlashAttention2 has been toggled on, but it cannot be used due to the following error:' install_message = 'Please refer to the documentation of https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2 to install Flash Attention 2.' if importlib.util.find_spec('flash_attn') is None: raise ImportError(f'{preface} the package flash_attn seems to be not installed. {install_message}') flash_attention_version = version.parse(importlib.metadata.version('flash_attn')) if torch.version.cuda: if flash_attention_version < version.parse('2.1.0'): raise ImportError(f'{preface} you need flash_attn package version to be greater or equal than 2.1.0. Detected version {flash_attention_version}. {install_message}') else: raise ImportError(f'{preface} Flash Attention 2 is not available. {install_message}') elif torch.version.hip: if flash_attention_version < version.parse('2.0.4'): raise ImportError(f'{preface} you need flash_attn package version to be greater or equal than 2.0.4. Make sure to have that version installed - detected version {flash_attention_version}. {install_message}') else: raise ImportError(f'{preface} Flash Attention 2 is not available. {install_message}') _is_bettertransformer = getattr(cls, 'use_bettertransformer', False) if _is_bettertransformer: raise ValueError('Flash Attention 2 and BetterTransformer API are not compatible. Please make sure to disable BetterTransformers by doing model.reverse_bettertransformer()') if torch_dtype is None: logger.warning_once('You are attempting to use Flash Attention 2.0 without specifying a torch dtype. This might lead to unexpected behaviour') elif torch_dtype is not None and torch_dtype not in [torch.float16, torch.bfloat16]: logger.warning_once(f"""Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in {cls.__name__} is {torch_dtype}. You should run training or inference using Automatic Mixed-Precision via the `with torch.autocast(device_type='torch_device'):` decorator, or load the model with the `torch_dtype` argument. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)`""") if check_device_map and device_map is None and (torch.empty(0).device.type != 'cuda'): if torch.cuda.is_available(): logger.warning_once("You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`.") else: raise ValueError('You are attempting to use Flash Attention 2.0 with a model not initialized on GPU and with no GPU available. This is not supported yet. Please make sure to have access to a GPU and either initialise the model on a GPU by passing a device_map or initialising the model on CPU and then moving it to GPU.') elif check_device_map and device_map is not None and isinstance(device_map, dict) and ('cpu' in device_map.values() or 'disk' in device_map.values()): raise ValueError('You are attempting to use Flash Attention 2.0 with a model dispatched on CPU or disk. This is not supported. Please make sure to initialise the model on a GPU by passing a device_map that contains only GPU devices as keys.') if not hard_check_only: config._attn_implementation = 'flash_attention_2' return config @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False) -> PretrainedConfig: if hard_check_only: if not cls._supports_sdpa: raise ValueError(f'{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`') if not is_torch_sdpa_available(): raise ImportError('PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.1.1.') if not is_torch_sdpa_available() or not cls._supports_sdpa: return config _is_bettertransformer = getattr(cls, 'use_bettertransformer', False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = 'sdpa' return config def enable_input_require_grads(self): def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) def disable_input_require_grads(self): self._require_grads_hook.remove() def get_input_embeddings(self) -> nn.Module: base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: return None def _init_weights(self, module): pass def _initialize_weights(self, module): if getattr(module, '_is_hf_initialized', False): return self._init_weights(module) module._is_hf_initialized = True def tie_weights(self): if getattr(self.config, 'tie_word_embeddings', True): output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if getattr(self.config, 'is_encoder_decoder', False) and getattr(self.config, 'tie_encoder_decoder', False): if hasattr(self, self.base_model_prefix): self = getattr(self, self.base_model_prefix) tied_weights = self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix, 'encoder') self._dynamic_tied_weights_keys = tied_weights for module in self.modules(): if hasattr(module, '_tie_weights'): module._tie_weights() @staticmethod def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str, base_encoder_name: str): uninitialized_encoder_weights: List[str] = [] tied_weights: List[str] = [] if decoder.__class__ != encoder.__class__: logger.info(f'{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized.') def tie_encoder_to_decoder_recursively(decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, base_encoder_name: str, uninitialized_encoder_weights: List[str], depth=0, total_decoder_name='', total_encoder_name=''): assert isinstance(decoder_pointer, nn.Module) and isinstance(encoder_pointer, nn.Module), f'{decoder_pointer} and {encoder_pointer} have to be of type nn.Module' if hasattr(decoder_pointer, 'weight'): assert hasattr(encoder_pointer, 'weight') encoder_pointer.weight = decoder_pointer.weight tied_weights.append(f'{base_encoder_name}{total_encoder_name}.weight') if hasattr(decoder_pointer, 'bias'): assert hasattr(encoder_pointer, 'bias') tied_weights.append(f'{base_encoder_name}{total_encoder_name}.bias') encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert len(encoder_modules) > 0, f'Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}' all_encoder_weights = {module_name + '/' + sub_name for sub_name in encoder_modules.keys()} encoder_layer_pos = 0 for (name, module) in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(encoder_modules) != len(decoder_modules): encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError('Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model.') else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively(decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + '/' + name, base_encoder_name, uninitialized_encoder_weights, depth=depth + 1, total_encoder_name=f'{total_encoder_name}.{encoder_name}', total_decoder_name=f'{total_decoder_name}.{decoder_name}') all_encoder_weights.remove(module_name + '/' + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, base_encoder_name, uninitialized_encoder_weights) if len(uninitialized_encoder_weights) > 0: logger.warning(f'The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}') return tied_weights def _tie_or_clone_weights(self, output_embeddings, input_embeddings): if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if getattr(output_embeddings, 'bias', None) is not None: output_embeddings.bias.data = nn.functional.pad(output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), 'constant', 0) if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): output_embeddings.out_features = input_embeddings.num_embeddings def _get_no_split_modules(self, device_map: str): _no_split_modules = set() modules_to_check = [self] while len(modules_to_check) > 0: module = modules_to_check.pop(-1) if module.__class__.__name__ not in _no_split_modules: if isinstance(module, PreTrainedModel): if module._no_split_modules is None: raise ValueError(f"{module.__class__.__name__} does not support `device_map='{device_map}'`. To implement support, the model class needs to implement the `_no_split_modules` attribute.") else: _no_split_modules = _no_split_modules | set(module._no_split_modules) modules_to_check += list(module.children()) return list(_no_split_modules) def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(model_embeds.weight, modifier_rank=None): vocab_size = model_embeds.weight.shape[0] else: vocab_size = model_embeds.weight.shape[0] self.config.get_text_config().vocab_size = vocab_size self.vocab_size = vocab_size self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) if hasattr(old_embeddings, '_hf_hook'): hook = old_embeddings._hf_hook add_hook_to_module(new_embeddings, hook) old_embeddings_requires_grad = old_embeddings.weight.requires_grad new_embeddings.requires_grad_(old_embeddings_requires_grad) self.set_input_embeddings(new_embeddings) is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if pad_to_multiple_of is not None: if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(new_embeddings.weight, modifier_rank=None): new_num_tokens = new_embeddings.weight.shape[0] else: new_num_tokens = new_embeddings.weight.shape[0] if self.get_output_embeddings() is not None and (not self.config.tie_word_embeddings): old_lm_head = self.get_output_embeddings() if isinstance(old_lm_head, torch.nn.Embedding): new_lm_head = self._get_resized_embeddings(old_lm_head, new_num_tokens) else: new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) if hasattr(old_lm_head, '_hf_hook'): hook = old_lm_head._hf_hook add_hook_to_module(new_lm_head, hook) old_lm_head_requires_grad = old_lm_head.weight.requires_grad new_lm_head.requires_grad_(old_lm_head_requires_grad) self.set_output_embeddings(new_lm_head) return self.get_input_embeddings() def _get_resized_embeddings(self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: if pad_to_multiple_of is not None: if not isinstance(pad_to_multiple_of, int): raise ValueError(f'Asking to pad the embedding matrix to a multiple of `{pad_to_multiple_of}`, which is not and integer. Please make sure to pass an integer') if new_num_tokens is None: new_num_tokens = old_embeddings.weight.shape[0] new_num_tokens = (new_num_tokens + pad_to_multiple_of - 1) // pad_to_multiple_of * pad_to_multiple_of else: logger.info(f'You are resizing the embedding layer without providing a `pad_to_multiple_of` parameter. This means that the new embedding dimension will be {new_num_tokens}. This might induce some performance reduction as *Tensor Cores* will not be available. For more details about this, or help on choosing the correct value for resizing, refer to this guide: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc') if new_num_tokens is None: return old_embeddings is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None): (old_num_tokens, old_embedding_dim) = old_embeddings.weight.size() else: (old_num_tokens, old_embedding_dim) = old_embeddings.weight.size() if old_num_tokens == new_num_tokens and (not is_deepspeed_zero3_enabled()): return old_embeddings if not isinstance(old_embeddings, nn.Embedding): raise TypeError(f'Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}.') new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim, device=old_embeddings.weight.device, dtype=old_embeddings.weight.dtype) self._init_weights(new_embeddings) n = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] else: new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :] if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_embeddings.weight, new_embeddings.weight] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): old_embeddings.weight = new_embeddings.weight old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and new_num_tokens - 1 < old_embeddings.padding_idx: old_embeddings.padding_idx = None else: old_embeddings.weight.data = new_embeddings.weight.data old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0] if old_embeddings.padding_idx is not None and new_num_tokens - 1 < old_embeddings.padding_idx: old_embeddings.padding_idx = None return old_embeddings def _get_resized_lm_head(self, old_lm_head: nn.Linear, new_num_tokens: Optional[int]=None, transposed: Optional[bool]=False) -> nn.Linear: if new_num_tokens is None: return old_lm_head is_quantized = hasattr(self, 'hf_quantizer') and self.hf_quantizer is not None if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None): (old_num_tokens, old_lm_head_dim) = old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() else: (old_num_tokens, old_lm_head_dim) = old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size() if old_num_tokens == new_num_tokens and (not is_deepspeed_zero3_enabled()): return old_lm_head if not isinstance(old_lm_head, nn.Linear): raise TypeError(f'Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}.') new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim) has_new_lm_head_bias = old_lm_head.bias is not None new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias, device=old_lm_head.weight.device, dtype=old_lm_head.weight.dtype) self._init_weights(new_lm_head) num_tokens_to_copy = min(old_num_tokens, new_num_tokens) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias] with deepspeed.zero.GatheredParameters(params, modifier_rank=0): self._copy_lm_head_original_to_resized(new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias) else: self._copy_lm_head_original_to_resized(new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias) return new_lm_head def _copy_lm_head_original_to_resized(self, new_lm_head, old_lm_head, num_tokens_to_copy, transposed, has_new_lm_head_bias): if not transposed: new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :] else: new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy] if has_new_lm_head_bias: new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy] def resize_position_embeddings(self, new_num_position_embeddings: int): raise NotImplementedError(f'`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`') def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]: raise NotImplementedError(f'`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`') def init_weights(self): if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) if _init_weights: self.apply(self._initialize_weights) self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): for (layer, heads) in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) self.base_model._prune_heads(heads_to_prune) def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): if not self.supports_gradient_checkpointing: raise ValueError(f'{self.__class__.__name__} does not support gradient checkpointing.') if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {'use_reentrant': True} gradient_checkpointing_func = functools.partial(checkpoint, **gradient_checkpointing_kwargs) _is_using_old_format = 'value' in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func) else: self.apply(partial(self._set_gradient_checkpointing, value=True)) logger.warning('You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model.') if getattr(self, '_hf_peft_config_loaded', False): self.enable_input_require_grads() def _set_gradient_checkpointing(self, enable: bool=True, gradient_checkpointing_func: Callable=checkpoint): is_gradient_checkpointing_set = False if hasattr(self, 'gradient_checkpointing'): self._gradient_checkpointing_func = gradient_checkpointing_func self.gradient_checkpointing = enable is_gradient_checkpointing_set = True for module in self.modules(): if hasattr(module, 'gradient_checkpointing'): module._gradient_checkpointing_func = gradient_checkpointing_func module.gradient_checkpointing = enable is_gradient_checkpointing_set = True if not is_gradient_checkpointing_set: raise ValueError(f'{self.__class__.__name__} is not compatible with gradient checkpointing. Make sure all the architecture support it by setting a boolean attribute `gradient_checkpointing` to modules of the model that uses checkpointing.') def gradient_checkpointing_disable(self): if self.supports_gradient_checkpointing: _is_using_old_format = 'value' in inspect.signature(self._set_gradient_checkpointing).parameters if not _is_using_old_format: self._set_gradient_checkpointing(enable=False) else: logger.warning('You are using an old version of the checkpointing format that is deprecated (We will also silently ignore `gradient_checkpointing_kwargs` in case you passed it).Please update to the new format on your modeling file. To use the new format, you need to completely remove the definition of the method `_set_gradient_checkpointing` in your model.') self.apply(partial(self._set_gradient_checkpointing, value=False)) if getattr(self, '_hf_peft_config_loaded', False): self.disable_input_require_grads() @property def is_gradient_checkpointing(self) -> bool: return any((hasattr(m, 'gradient_checkpointing') and m.gradient_checkpointing for m in self.modules())) def save_pretrained(self, save_directory: Union[str, os.PathLike], is_main_process: bool=True, state_dict: Optional[dict]=None, save_function: Callable=torch.save, push_to_hub: bool=False, max_shard_size: Union[int, str]='5GB', safe_serialization: bool=True, variant: Optional[str]=None, token: Optional[Union[str, bool]]=None, save_peft_format: bool=True, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) ignore_metadata_errors = kwargs.pop('ignore_metadata_errors', False) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None: kwargs['token'] = token _hf_peft_config_loaded = getattr(self, '_hf_peft_config_loaded', False) hf_quantizer = getattr(self, 'hf_quantizer', None) quantization_serializable = hf_quantizer is not None and isinstance(hf_quantizer, HfQuantizer) and hf_quantizer.is_serializable if hf_quantizer is not None and (not _hf_peft_config_loaded) and (not quantization_serializable): raise ValueError(f'The model is quantized with {hf_quantizer.quantization_config.quant_method} and is not serializable - check out the warnings from the logger on the traceback to understand the reason why the quantized model is not serializable.') if 'save_config' in kwargs: warnings.warn('`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead.') is_main_process = kwargs.pop('save_config') if safe_serialization and (not is_safetensors_available()): raise ImportError('`safe_serialization` requires the `safetensors library: `pip install safetensors`.') if os.path.isfile(save_directory): logger.error(f'Provided path ({save_directory}) should be a directory, not a file') return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop('commit_message', None) repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) model_to_save = unwrap_model(self) dtype = get_parameter_dtype(model_to_save) model_to_save.config.torch_dtype = str(dtype).split('.')[1] model_to_save.config.architectures = [model_to_save.__class__.__name__] if self._auto_class is not None: custom_object_save(self, save_directory, config=self.config) if is_main_process: if not _hf_peft_config_loaded: misplaced_generation_parameters = model_to_save.config._get_non_default_generation_parameters() if self.can_generate() and len(misplaced_generation_parameters) > 0: warnings.warn(f"Moving the following attributes in the config to the generation config: {misplaced_generation_parameters}. You are seeing this warning because you've set generation parameters in the model config, as opposed to in the generation config.", UserWarning) for (param_name, param_value) in misplaced_generation_parameters.items(): setattr(model_to_save.generation_config, param_name, param_value) setattr(model_to_save.config, param_name, None) model_to_save.config.save_pretrained(save_directory) if self.can_generate(): model_to_save.generation_config.save_pretrained(save_directory) if _hf_peft_config_loaded: logger.info('Detected adapters on the model, saving the model in the PEFT format, only adapter weights will be saved.') state_dict = model_to_save.get_adapter_state_dict() if save_peft_format: logger.info('To match the expected format of the PEFT library, all keys of the state dict of adapters will be pre-pended with `base_model.model`.') peft_state_dict = {} for (key, value) in state_dict.items(): peft_state_dict[f'base_model.model.{key}'] = value state_dict = peft_state_dict active_adapter = self.active_adapters() if len(active_adapter) > 1: raise ValueError('Multiple active adapters detected, saving multiple active adapters is not supported yet. You can save adapters separately one by one by iteratively calling `model.set_adapter(adapter_name)` then `model.save_pretrained(...)`') active_adapter = active_adapter[0] current_peft_config = self.peft_config[active_adapter] current_peft_config.save_pretrained(save_directory) module_map = {} if state_dict is None: if hasattr(self, 'hf_device_map') and len(set(self.hf_device_map.values())) > 1 and ('cpu' in self.hf_device_map.values() or 'disk' in self.hf_device_map.values()): warnings.warn('Attempting to save a model with offloaded modules. Ensure that unallocated cpu memory exceeds the `shard_size` (5GB default)') for (name, module) in model_to_save.named_modules(): if name == '': continue module_state_dict = module.state_dict() for key in module_state_dict: module_map[name + f'.{key}'] = module state_dict = model_to_save.state_dict() if IS_SAGEMAKER_MP_POST_1_10: for (smp_to_hf, _) in smp.state.module_manager.translate_functions: state_dict = smp_to_hf(state_dict) if self._keys_to_ignore_on_save is not None: for ignore_key in self._keys_to_ignore_on_save: if ignore_key in state_dict.keys(): del state_dict[ignore_key] if safe_serialization: ptrs = collections.defaultdict(list) for (name, tensor) in state_dict.items(): if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: ptrs[id(tensor)].append(name) if hasattr(self, 'hf_device_map'): tied_params = find_tied_parameters(self) if tied_params: tied_names = tied_params[0] shared_ptrs = {ptr: names for (ptr, names) in ptrs.items() if any((name in tied_names for name in names))} else: shared_ptrs = {} else: shared_ptrs = {ptr: names for (ptr, names) in ptrs.items() if len(names) > 1} _tied_weights_keys = _get_tied_weight_keys(self) error_names = [] to_delete_names = set() for names in shared_ptrs.values(): if _tied_weights_keys is not None: found = 0 for name in sorted(names): matches_pattern = any((re.search(pat, name) for pat in _tied_weights_keys)) if matches_pattern and name in state_dict: found += 1 if found < len(names): to_delete_names.add(name) (shared_names, disjoint_names) = _find_disjoint(shared_ptrs.values(), state_dict) for name in disjoint_names: state_dict[name] = state_dict[name].clone() (shared_names, identical_names) = _find_identical(shared_names, state_dict) for inames in identical_names: known = inames.intersection(to_delete_names) for name in known: del state_dict[name] unknown = inames.difference(to_delete_names) if len(unknown) > 1: error_names.append(unknown) if shared_names: error_names.append(set(shared_names)) if len(error_names) > 0: raise RuntimeError(f'The weights trying to be saved contained shared tensors {error_names} that are mismatching the transformers base configuration. Try saving using `safe_serialization=False` or remove this tensor sharing.') if not _hf_peft_config_loaded: weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME weights_name = _add_variant(weights_name, variant) else: weights_name = ADAPTER_SAFE_WEIGHTS_NAME if safe_serialization else ADAPTER_WEIGHTS_NAME filename_pattern = weights_name.replace('.bin', '{suffix}.bin').replace('.safetensors', '{suffix}.safetensors') state_dict_split = split_torch_state_dict_into_shards(state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size) index = None if state_dict_split.is_sharded: index = {'metadata': state_dict_split.metadata, 'weight_map': state_dict_split.tensor_to_filename} for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) weights_no_suffix = weights_name.replace('.bin', '').replace('.safetensors', '') filename_no_suffix = filename.replace('.bin', '').replace('.safetensors', '') reg = re.compile('(.*?)-\\d{5}-of-\\d{5}') if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and (filename not in state_dict_split.filename_to_tensors.keys()) and is_main_process and (reg.fullmatch(filename_no_suffix) is not None): os.remove(full_filename) filename_to_tensors = state_dict_split.filename_to_tensors.items() if module_map: filename_to_tensors = logging.tqdm(filename_to_tensors, desc='Saving checkpoint shards') for (shard_file, tensors) in filename_to_tensors: shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors} if module_map: if accelerate_version < version.parse('0.31'): raise ImportError(f'You need accelerate version to be greater or equal than 0.31 to save models with offloaded parameters. Detected version {accelerate_version}. Please upgrade accelerate with `pip install -U accelerate`') shard_state_dict = {name: '' for name in shard} for module_name in shard: module = module_map[module_name] shard_state_dict = get_state_dict_from_offload(module, module_name, shard_state_dict) shard = shard_state_dict del shard_state_dict gc.collect() if safe_serialization: safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={'format': 'pt'}) else: save_function(shard, os.path.join(save_directory, shard_file)) if index is None: path_to_weights = os.path.join(save_directory, weights_name) logger.info(f'Model weights saved in {path_to_weights}') else: save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant)) with open(save_index_file, 'w', encoding='utf-8') as f: content = json.dumps(index, indent=2, sort_keys=True) + '\n' f.write(content) logger.info(f'The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be split in {len(state_dict_split.filename_to_tensors)} checkpoint shards. You can find where each parameters has been saved in the index located at {save_index_file}.') if push_to_hub: model_card = create_and_tag_model_card(repo_id, self.model_tags, token=token, ignore_metadata_errors=ignore_metadata_errors) model_card.save(os.path.join(save_directory, 'README.md')) self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token) @wraps(PushToHubMixin.push_to_hub) def push_to_hub(self, *args, **kwargs): tags = self.model_tags if self.model_tags is not None else [] tags_kwargs = kwargs.get('tags', []) if isinstance(tags_kwargs, str): tags_kwargs = [tags_kwargs] for tag in tags_kwargs: if tag not in tags: tags.append(tag) if tags: kwargs['tags'] = tags return super().push_to_hub(*args, **kwargs) def get_memory_footprint(self, return_buffers=True): mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem @wraps(torch.nn.Module.cuda) def cuda(self, *args, **kwargs): if getattr(self, 'quantization_method', None) == QuantizationMethod.HQQ: raise ValueError('`.cuda` is not supported for HQQ-quantized models.') if getattr(self, 'quantization_method', None) == QuantizationMethod.BITS_AND_BYTES: if getattr(self, 'is_loaded_in_8bit', False): raise ValueError('Calling `cuda()` is not supported for `8-bit` quantized models. Please use the model as it is, since the model has already been set to the correct devices.') elif version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.43.2'): raise ValueError(f'Calling `cuda()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2.') else: return super().cuda(*args, **kwargs) @wraps(torch.nn.Module.to) def to(self, *args, **kwargs): dtype_present_in_args = 'dtype' in kwargs if not dtype_present_in_args: for arg in args: if isinstance(arg, torch.dtype): dtype_present_in_args = True break if getattr(self, 'quantization_method', None) == QuantizationMethod.HQQ: raise ValueError('`.to` is not supported for HQQ-quantized models.') if getattr(self, 'quantization_method', None) == QuantizationMethod.BITS_AND_BYTES: if dtype_present_in_args: raise ValueError('You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired `dtype` by passing the correct `torch_dtype` argument.') if getattr(self, 'is_loaded_in_8bit', False): raise ValueError('`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the model has already been set to the correct devices and casted to the correct `dtype`.') elif version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.43.2'): raise ValueError(f'Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2.') elif getattr(self, 'quantization_method', None) == QuantizationMethod.GPTQ: if dtype_present_in_args: raise ValueError('You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired `dtype` by passing the correct `torch_dtype` argument.') return super().to(*args, **kwargs) def half(self, *args): if getattr(self, 'is_quantized', False): raise ValueError('`.half()` is not supported for quantized model. Please use the model as it is, since the model has already been casted to the correct `dtype`.') else: return super().half(*args) def float(self, *args): if getattr(self, 'is_quantized', False): raise ValueError('`.float()` is not supported for quantized model. Please use the model as it is, since the model has already been casted to the correct `dtype`.') else: return super().float(*args) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', use_safetensors: bool=None, **kwargs) -> 'PreTrainedModel': state_dict = kwargs.pop('state_dict', None) from_tf = kwargs.pop('from_tf', False) from_flax = kwargs.pop('from_flax', False) resume_download = kwargs.pop('resume_download', None) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) use_auth_token = kwargs.pop('use_auth_token', None) trust_remote_code = kwargs.pop('trust_remote_code', None) _ = kwargs.pop('mirror', None) from_pipeline = kwargs.pop('_from_pipeline', None) from_auto_class = kwargs.pop('_from_auto', False) _fast_init = kwargs.pop('_fast_init', True) torch_dtype = kwargs.pop('torch_dtype', None) low_cpu_mem_usage = kwargs.pop('low_cpu_mem_usage', None) device_map = kwargs.pop('device_map', None) max_memory = kwargs.pop('max_memory', None) offload_folder = kwargs.pop('offload_folder', None) offload_state_dict = kwargs.pop('offload_state_dict', False) offload_buffers = kwargs.pop('offload_buffers', False) load_in_8bit = kwargs.pop('load_in_8bit', False) load_in_4bit = kwargs.pop('load_in_4bit', False) quantization_config = kwargs.pop('quantization_config', None) subfolder = kwargs.pop('subfolder', '') commit_hash = kwargs.pop('_commit_hash', None) variant = kwargs.pop('variant', None) adapter_kwargs = kwargs.pop('adapter_kwargs', {}) adapter_name = kwargs.pop('adapter_name', 'default') use_flash_attention_2 = kwargs.pop('use_flash_attention_2', False) generation_config = kwargs.pop('generation_config', None) gguf_file = kwargs.pop('gguf_file', None) gguf_path = None if is_fsdp_enabled(): low_cpu_mem_usage = True if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None and adapter_kwargs is not None and ('token' not in adapter_kwargs): adapter_kwargs['token'] = token if use_safetensors is None and (not is_safetensors_available()): use_safetensors = False if trust_remote_code is True: logger.warning('The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.') if gguf_file is not None and (not is_accelerate_available()): raise ValueError('accelerate is required when loading a GGUF file `pip install accelerate`.') if commit_hash is None: if not isinstance(config, PretrainedConfig): resolved_config_file = cached_file(pretrained_model_name_or_path, CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, '_commit_hash', None) if is_peft_available(): _adapter_model_path = adapter_kwargs.pop('_adapter_model_path', None) if _adapter_model_path is None: _adapter_model_path = find_adapter_config_file(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, _commit_hash=commit_hash, **adapter_kwargs) if _adapter_model_path is not None and os.path.isfile(_adapter_model_path): with open(_adapter_model_path, 'r', encoding='utf-8') as f: _adapter_model_path = pretrained_model_name_or_path pretrained_model_name_or_path = json.load(f)['base_model_name_or_path'] else: _adapter_model_path = None if isinstance(device_map, torch.device): device_map = {'': device_map} elif isinstance(device_map, str) and device_map not in ['auto', 'balanced', 'balanced_low_0', 'sequential']: try: device_map = {'': torch.device(device_map)} except RuntimeError: raise ValueError(f"When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or 'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}.") elif isinstance(device_map, int): if device_map < 0: raise ValueError("You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' ") else: device_map = {'': device_map} if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True elif not low_cpu_mem_usage: raise ValueError('Passing along a `device_map` requires `low_cpu_mem_usage=True`') if low_cpu_mem_usage: if is_deepspeed_zero3_enabled(): raise ValueError('DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`.') elif not is_accelerate_available(): raise ImportError(f"Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`") if load_in_4bit or load_in_8bit: if quantization_config is not None: raise ValueError("You can't pass `load_in_4bit`or `load_in_8bit` as a kwarg when passing `quantization_config` argument at the same time.") config_dict = {k: v for (k, v) in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters} config_dict = {**config_dict, 'load_in_4bit': load_in_4bit, 'load_in_8bit': load_in_8bit} (quantization_config, kwargs) = BitsAndBytesConfig.from_dict(config_dict=config_dict, return_unused_kwargs=True, **kwargs) logger.warning('The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead.') from_pt = not from_tf | from_flax user_agent = {'file_type': 'model', 'framework': 'pytorch', 'from_auto_class': from_auto_class} if from_pipeline is not None: user_agent['using_pipeline'] = from_pipeline if is_offline_mode() and (not local_files_only): logger.info('Offline mode: forcing local_files_only=True') local_files_only = True if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path (config, model_kwargs) = cls.config_class.from_pretrained(config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs) else: config = copy.deepcopy(config) kwarg_attn_imp = kwargs.pop('attn_implementation', None) if kwarg_attn_imp is not None: config._attn_implementation = kwarg_attn_imp model_kwargs = kwargs pre_quantized = getattr(config, 'quantization_config', None) is not None if pre_quantized or quantization_config is not None: if pre_quantized: config.quantization_config = AutoHfQuantizer.merge_quantization_configs(config.quantization_config, quantization_config) else: config.quantization_config = quantization_config hf_quantizer = AutoHfQuantizer.from_config(config.quantization_config, pre_quantized=pre_quantized) else: hf_quantizer = None if hf_quantizer is not None: hf_quantizer.validate_environment(torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map) torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) device_map = hf_quantizer.update_device_map(device_map) user_agent['quant'] = hf_quantizer.quantization_config.quant_method.value if low_cpu_mem_usage is None: low_cpu_mem_usage = True logger.warning('`low_cpu_mem_usage` was None, now set to True since model is quantized.') is_quantized = hf_quantizer is not None is_sharded = False sharded_metadata = None loading_info = None keep_in_fp32_modules = None use_keep_in_fp32_modules = False if gguf_file is not None and hf_quantizer is not None: raise ValueError('You cannot combine Quantization and loading a model from a GGUF file, try again by making sure you did not passed a `quantization_config` or that you did not load a quantized model from the Hub.') if pretrained_model_name_or_path is not None and gguf_file is None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + '.index')): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + '.index') elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME) elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) elif use_safetensors is not False and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) elif use_safetensors is not False and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant))): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)) is_sharded = True elif not use_safetensors and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) elif not use_safetensors and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))): archive_file = os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) is_sharded = True elif not use_safetensors and (os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + '.index')) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME))): raise EnvironmentError(f'Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those weights.') elif not use_safetensors and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)): raise EnvironmentError(f'Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True` to load this model from those weights.') elif use_safetensors: raise EnvironmentError(f'Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory {pretrained_model_name_or_path}.') else: raise EnvironmentError(f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}.") elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + '.index')): if not from_tf: raise ValueError(f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set from_tf to True to load from this checkpoint.") archive_file = os.path.join(subfolder, pretrained_model_name_or_path + '.index') is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: if from_tf: filename = TF2_WEIGHTS_NAME elif from_flax: filename = FLAX_WEIGHTS_NAME elif use_safetensors is not False: filename = _add_variant(SAFE_WEIGHTS_NAME, variant) else: filename = _add_variant(WEIGHTS_NAME, variant) try: cached_file_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'resume_download': resume_download, 'local_files_only': local_files_only, 'token': token, 'user_agent': user_agent, 'revision': revision, 'subfolder': subfolder, '_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_commit_hash': commit_hash} resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): resolved_archive_file = cached_file(pretrained_model_name_or_path, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True elif use_safetensors: if revision == 'main': (resolved_archive_file, revision, is_sharded) = auto_conversion(pretrained_model_name_or_path, **cached_file_kwargs) cached_file_kwargs['revision'] = revision if resolved_archive_file is None: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`.') else: filename = _add_variant(WEIGHTS_NAME, variant) resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): resolved_archive_file = cached_file(pretrained_model_name_or_path, _add_variant(WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs) if resolved_archive_file is not None: is_sharded = True if not local_files_only and (not is_offline_mode()): if resolved_archive_file is not None: if filename in [WEIGHTS_NAME, WEIGHTS_INDEX_NAME]: safe_weights_name = SAFE_WEIGHTS_INDEX_NAME if is_sharded else SAFE_WEIGHTS_NAME has_file_kwargs = {'revision': revision, 'proxies': proxies, 'token': token, 'cache_dir': cache_dir, 'local_files_only': local_files_only} cached_file_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'resume_download': resume_download, 'local_files_only': local_files_only, 'user_agent': user_agent, 'subfolder': subfolder, '_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_commit_hash': commit_hash, **has_file_kwargs} if not has_file(pretrained_model_name_or_path, safe_weights_name, **has_file_kwargs): Thread(target=auto_conversion, args=(pretrained_model_name_or_path,), kwargs={'ignore_errors_during_conversion': True, **cached_file_kwargs}, name='Thread-autoconversion').start() else: has_file_kwargs = {'revision': revision, 'proxies': proxies, 'token': token, 'cache_dir': cache_dir, 'local_files_only': local_files_only} if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those weights.') elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use `from_flax=True` to load this model from those weights.') elif variant is not None and has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant {variant}. Use `variant=None` to load this model from those weights.') else: raise EnvironmentError(f'{pretrained_model_name_or_path} does not appear to have a file named {_add_variant(WEIGHTS_NAME, variant)}, {_add_variant(SAFE_WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}.') except EnvironmentError: raise except Exception as e: raise EnvironmentError(f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}.") from e if is_local: logger.info(f'loading weights file {archive_file}') resolved_archive_file = archive_file else: logger.info(f'loading weights file {filename} from cache at {resolved_archive_file}') elif gguf_file: from .modeling_gguf_pytorch_utils import load_gguf_checkpoint if os.path.isfile(gguf_file): gguf_path = gguf_file else: cached_file_kwargs = {'cache_dir': cache_dir, 'force_download': force_download, 'proxies': proxies, 'resume_download': resume_download, 'local_files_only': local_files_only, 'token': token, 'user_agent': user_agent, 'revision': revision, 'subfolder': subfolder, '_raise_exceptions_for_gated_repo': False, '_raise_exceptions_for_missing_entries': False, '_commit_hash': commit_hash} gguf_path = cached_file(pretrained_model_name_or_path, gguf_file, **cached_file_kwargs) state_dict = load_gguf_checkpoint(gguf_path, return_tensors=True)['tensors'] resolved_archive_file = None is_sharded = False else: resolved_archive_file = None if is_sharded: (resolved_archive_file, sharded_metadata) = get_checkpoint_shard_files(pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash) if is_safetensors_available() and isinstance(resolved_archive_file, str) and resolved_archive_file.endswith('.safetensors'): with safe_open(resolved_archive_file, framework='pt') as f: metadata = f.metadata() if metadata.get('format') == 'pt': pass elif metadata.get('format') == 'tf': from_tf = True logger.info('A TensorFlow safetensors file is being loaded in a PyTorch model.') elif metadata.get('format') == 'flax': from_flax = True logger.info('A Flax safetensors file is being loaded in a PyTorch model.') elif metadata.get('format') == 'mlx': pass else: raise ValueError(f"Incompatible safetensors file. File metadata is not ['pt', 'tf', 'flax', 'mlx'] but {metadata.get('format')}") from_pt = not from_tf | from_flax if from_pt: if not is_sharded and state_dict is None: state_dict = load_state_dict(resolved_archive_file) dtype_orig = None if torch_dtype is not None: if isinstance(torch_dtype, str): if torch_dtype == 'auto': if hasattr(config, 'torch_dtype') and config.torch_dtype is not None: torch_dtype = config.torch_dtype logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object") else: if is_sharded and 'dtype' in sharded_metadata: torch_dtype = sharded_metadata['dtype'] elif not is_sharded: torch_dtype = get_state_dict_dtype(state_dict) else: one_state_dict = load_state_dict(resolved_archive_file[0]) torch_dtype = get_state_dict_dtype(one_state_dict) del one_state_dict logger.info("Since the `torch_dtype` attribute can't be found in model's config object, will use torch_dtype={torch_dtype} as derived from model's weights") elif hasattr(torch, torch_dtype): torch_dtype = getattr(torch, torch_dtype) else: raise ValueError(f'`torch_dtype` can be one of: `torch.dtype`, `"auto"` or a string of a valid `torch.dtype`, but received {torch_dtype}') dtype_orig = cls._set_default_torch_dtype(torch_dtype) use_keep_in_fp32_modules = cls._keep_in_fp32_modules is not None and (torch_dtype == torch.float16 or hasattr(hf_quantizer, 'use_keep_in_fp32_modules')) if is_sharded: loaded_state_dict_keys = sharded_metadata['all_checkpoint_keys'] else: loaded_state_dict_keys = list(state_dict.keys()) if gguf_path is None and (low_cpu_mem_usage or (use_keep_in_fp32_modules and is_accelerate_available())): state_dict = None config.name_or_path = pretrained_model_name_or_path init_contexts = [no_init_weights(_enable=_fast_init)] if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed logger.info('Detected DeepSpeed ZeRO-3: activating zero.init() for this model') init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts elif low_cpu_mem_usage: init_contexts.append(init_empty_weights()) config = copy.deepcopy(config) config = cls._autoset_attn_implementation(config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map) with ContextManagers(init_contexts): model = cls(config, *model_args, **model_kwargs) config = model.config if use_keep_in_fp32_modules: if is_accelerate_available() and (not is_deepspeed_zero3_enabled()): low_cpu_mem_usage = True keep_in_fp32_modules = model._keep_in_fp32_modules else: keep_in_fp32_modules = [] if hf_quantizer is not None: hf_quantizer.preprocess_model(model=model, device_map=device_map, keep_in_fp32_modules=keep_in_fp32_modules) config._pre_quantization_dtype = torch_dtype if isinstance(device_map, str): special_dtypes = {} if hf_quantizer is not None: special_dtypes.update(hf_quantizer.get_special_dtypes_update(model, torch_dtype)) special_dtypes.update({name: torch.float32 for (name, _) in model.named_parameters() if any((m in name for m in keep_in_fp32_modules))}) target_dtype = torch_dtype if hf_quantizer is not None: target_dtype = hf_quantizer.adjust_target_dtype(target_dtype) no_split_modules = model._get_no_split_modules(device_map) if device_map not in ['auto', 'balanced', 'balanced_low_0', 'sequential']: raise ValueError("If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'.") device_map_kwargs = {'no_split_module_classes': no_split_modules} if 'special_dtypes' in inspect.signature(infer_auto_device_map).parameters: device_map_kwargs['special_dtypes'] = special_dtypes elif len(special_dtypes) > 0: logger.warning('This model has some weights that should be kept in higher precision, you need to upgrade `accelerate` to properly deal with them (`pip install --upgrade accelerate`).') if device_map != 'sequential': max_memory = get_balanced_memory(model, dtype=target_dtype, low_zero=device_map == 'balanced_low_0', max_memory=max_memory, **device_map_kwargs) else: max_memory = get_max_memory(max_memory) if hf_quantizer is not None: max_memory = hf_quantizer.adjust_max_memory(max_memory) device_map_kwargs['max_memory'] = max_memory model.tie_weights() device_map = infer_auto_device_map(model, dtype=target_dtype, **device_map_kwargs) if hf_quantizer is not None: hf_quantizer.validate_environment(device_map=device_map) elif device_map is not None: model.tie_weights() tied_params = find_tied_parameters(model) check_tied_parameters_on_same_device(tied_params, device_map) if from_tf: if resolved_archive_file.endswith('.index'): model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) else: try: from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model (model, loading_info) = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True) except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.') raise elif from_flax: try: from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file) except ImportError: logger.error('Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions.') raise elif from_pt: if dtype_orig is not None: torch.set_default_dtype(dtype_orig) (model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs) = cls._load_pretrained_model(model, state_dict, loaded_state_dict_keys, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=ignore_mismatched_sizes, sharded_metadata=sharded_metadata, _fast_init=_fast_init, low_cpu_mem_usage=low_cpu_mem_usage, device_map=device_map, offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, hf_quantizer=hf_quantizer, keep_in_fp32_modules=keep_in_fp32_modules, gguf_path=gguf_path) model.tie_weights() model.eval() if model.can_generate() and generation_config is not None: logger.info('The user-defined `generation_config` will be used to override the default generation config.') model.generation_config = model.generation_config.from_dict(generation_config.to_dict()) elif model.can_generate() and pretrained_model_name_or_path is not None: try: model.generation_config = GenerationConfig.from_pretrained(pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs) except OSError: logger.info('Generation config file not found, using a generation config created from the model config.') pass if device_map is not None: device_map_kwargs = {'device_map': device_map, 'offload_dir': offload_folder, 'offload_index': offload_index, 'offload_buffers': offload_buffers} if 'skip_keys' in inspect.signature(dispatch_model).parameters: device_map_kwargs['skip_keys'] = model._skip_keys_device_placement if 'force_hooks' in inspect.signature(dispatch_model).parameters and hf_quantizer is not None and (hf_quantizer.quantization_config.quant_method == QuantizationMethod.HQQ): device_map_kwargs['force_hooks'] = True if hf_quantizer is not None and hf_quantizer.quantization_config.quant_method == QuantizationMethod.FBGEMM_FP8 and isinstance(device_map, dict) and ('cpu' in device_map.values() or 'disk' in device_map.values()): device_map_kwargs['offload_buffers'] = True if not is_fsdp_enabled() and (not is_deepspeed_zero3_enabled()): dispatch_model(model, **device_map_kwargs) if hf_quantizer is not None: hf_quantizer.postprocess_model(model) model.hf_quantizer = hf_quantizer if _adapter_model_path is not None: model.load_adapter(_adapter_model_path, adapter_name=adapter_name, token=token, adapter_kwargs=adapter_kwargs) if output_loading_info: if loading_info is None: loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys, 'mismatched_keys': mismatched_keys, 'error_msgs': error_msgs} return (model, loading_info) return model @classmethod def _load_pretrained_model(cls, model, state_dict, loaded_keys, resolved_archive_file, pretrained_model_name_or_path, ignore_mismatched_sizes=False, sharded_metadata=None, _fast_init=True, low_cpu_mem_usage=False, device_map=None, offload_folder=None, offload_state_dict=None, dtype=None, hf_quantizer=None, keep_in_fp32_modules=None, gguf_path=None): is_safetensors = False is_quantized = hf_quantizer is not None state_dict_folder = None state_dict_index = None if device_map is not None and 'disk' in device_map.values(): archive_file = resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file is_safetensors = archive_file.endswith('.safetensors') if offload_folder is None and (not is_safetensors): raise ValueError('The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder` for them. Alternatively, make sure you have `safetensors` installed if the model you are using offers the weights in this format.') if offload_folder is not None: os.makedirs(offload_folder, exist_ok=True) if offload_state_dict is None: offload_state_dict = True is_sharded_safetensors = is_safetensors and sharded_metadata is not None model.tie_weights() model_state_dict = model.state_dict() expected_keys = list(model_state_dict.keys()) prefix = model.base_model_prefix def _fix_key(key): if 'beta' in key: return key.replace('beta', 'bias') if 'gamma' in key: return key.replace('gamma', 'weight') return key original_loaded_keys = loaded_keys loaded_keys = [_fix_key(key) for key in loaded_keys] if len(prefix) > 0: has_prefix_module = any((s.startswith(prefix) for s in loaded_keys)) expects_prefix_module = any((s.startswith(prefix) for s in expected_keys)) else: has_prefix_module = False expects_prefix_module = False remove_prefix_from_model = not has_prefix_module and expects_prefix_module add_prefix_to_model = has_prefix_module and (not expects_prefix_module) if remove_prefix_from_model: _prefix = f'{prefix}.' expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)] expected_keys = [s[len(_prefix):] if s.startswith(_prefix) else s for s in expected_keys] elif add_prefix_to_model: expected_keys = ['.'.join([prefix, s]) for s in expected_keys] missing_keys = sorted(set(expected_keys) - set(loaded_keys)) unexpected_keys = set(loaded_keys) - set(expected_keys) model_buffers = {n for (n, _) in model.named_buffers()} if remove_prefix_from_model: model_buffers = {key[len(_prefix):] if key.startswith(_prefix) else key for key in model_buffers} elif add_prefix_to_model: model_buffers = {'.'.join([prefix, key]) for key in model_buffers} unexpected_keys = sorted(unexpected_keys - model_buffers) model.tie_weights() if device_map is None and (not is_fsdp_enabled()) and (not is_deepspeed_zero3_enabled()): ptrs = collections.defaultdict(list) for (name, tensor) in model.state_dict().items(): id_tensor = id_tensor_storage(tensor) ptrs[id_tensor].append(name) tied_params = [names for (_, names) in ptrs.items() if len(names) > 1] else: tied_params = find_tied_parameters(model) for group in tied_params: if remove_prefix_from_model: group = [key[len(_prefix):] if key.startswith(_prefix) else key for key in group] elif add_prefix_to_model: group = ['.'.join([prefix, key]) for key in group] missing_in_group = [k for k in missing_keys if k in group] if len(missing_in_group) > 0 and len(missing_in_group) < len(group): missing_keys = [k for k in missing_keys if k not in missing_in_group] if cls._keys_to_ignore_on_load_missing is not None: for pat in cls._keys_to_ignore_on_load_missing: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if cls._keys_to_ignore_on_load_unexpected is not None: for pat in cls._keys_to_ignore_on_load_unexpected: unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None] if hf_quantizer is not None: missing_keys = hf_quantizer.update_missing_keys(model, missing_keys, prefix) if low_cpu_mem_usage: for key in missing_keys: if key in list(model_state_dict.keys()): key = key elif f'{prefix}.{key}' in list(model_state_dict.keys()): key = f'{prefix}.{key}' elif key.startswith(prefix) and '.'.join(key.split('.')[1:]) in list(model_state_dict.keys()): key = '.'.join(key.split('.')[1:]) param = model_state_dict[key] target_dtype = dtype if keep_in_fp32_modules is not None and dtype == torch.float16 and any((module_to_keep_in_fp32 in key.split('.') for module_to_keep_in_fp32 in keep_in_fp32_modules)): target_dtype = torch.float32 if param.device == torch.device('meta'): value = torch.empty(*param.size(), dtype=target_dtype) if not is_quantized or getattr(hf_quantizer, 'requires_parameters_quantization', False) or (not hf_quantizer.check_quantized_param(model, param_value=value, param_name=key, state_dict={})): set_module_tensor_to_device(model, key, 'cpu', value) else: hf_quantizer.create_quantized_param(model, value, key, 'cpu', state_dict, unexpected_keys) if _fast_init: if not ignore_mismatched_sizes: if remove_prefix_from_model: _loaded_keys = [f'{prefix}.{k}' for k in loaded_keys] elif add_prefix_to_model: _loaded_keys = [k[len(prefix) + 1:] for k in loaded_keys] else: _loaded_keys = loaded_keys not_initialized_submodules = set_initialized_submodules(model, _loaded_keys) if hasattr(model.config, 'tie_word_embeddings') and model.config.tie_word_embeddings: output_embeddings = model.get_output_embeddings() if output_embeddings is not None: if not hasattr(output_embeddings, 'bias') or output_embeddings.bias is None: output_embeddings._is_hf_initialized = True else: not_initialized_submodules = dict(model.named_modules()) if is_deepspeed_zero3_enabled() and (not is_quantized): import deepspeed not_initialized_parameters = list(set(itertools.chain.from_iterable((submodule.parameters(recurse=False) for submodule in not_initialized_submodules.values())))) with deepspeed.zero.GatheredParameters(not_initialized_parameters, modifier_rank=0): model.apply(model._initialize_weights) else: model.apply(model._initialize_weights) if keep_in_fp32_modules is not None: for (name, param) in model.named_parameters(): if any((module_to_keep_in_fp32 in name.split('.') for module_to_keep_in_fp32 in keep_in_fp32_modules)): param.data = param.data.to(torch.float32) start_prefix = '' model_to_load = model if len(cls.base_model_prefix) > 0 and (not hasattr(model, cls.base_model_prefix)) and has_prefix_module: start_prefix = cls.base_model_prefix + '.' if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and (not has_prefix_module): model_to_load = getattr(model, cls.base_model_prefix) base_model_expected_keys = list(model_to_load.state_dict().keys()) if any((key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys)): raise ValueError('The state dictionary of the model you are trying to load is corrupted. Are you sure it was properly saved?') if device_map is not None: device_map = {k.replace(f'{cls.base_model_prefix}.', ''): v for (k, v) in device_map.items()} def _find_mismatched_keys(state_dict, model_state_dict, loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes): mismatched_keys = [] if ignore_mismatched_sizes: for checkpoint_key in loaded_keys: if checkpoint_key not in state_dict: continue model_key = checkpoint_key if remove_prefix_from_model: model_key = f'{prefix}.{checkpoint_key}' elif add_prefix_to_model: model_key = '.'.join(checkpoint_key.split('.')[1:]) if model_key in model_state_dict and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape: if state_dict[checkpoint_key].shape[-1] == 1 and state_dict[checkpoint_key].numel() * 2 == model_state_dict[model_key].numel(): pass else: mismatched_keys.append((checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)) del state_dict[checkpoint_key] return mismatched_keys if resolved_archive_file is not None: folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1]) else: folder = None if device_map is not None and is_safetensors: param_device_map = expand_device_map(device_map, original_loaded_keys, start_prefix) str_dtype = str(dtype).replace('torch.', '') if dtype is not None else 'float32' if sharded_metadata is None: archive_file = resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file weight_map = {p: archive_file for p in original_loaded_keys} else: weight_map = {p: os.path.join(folder, f) for (p, f) in sharded_metadata['weight_map'].items()} offload_index = {p[len(start_prefix):]: {'safetensors_file': f, 'weight_name': p, 'dtype': str_dtype} for (p, f) in weight_map.items() if p.startswith(start_prefix) and param_device_map[p[len(start_prefix):]] == 'disk'} else: offload_index = None if state_dict is not None: mismatched_keys = _find_mismatched_keys(state_dict, model_state_dict, original_loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes) if gguf_path: (error_msgs, offload_index, state_dict_index) = _load_state_dict_into_meta_model(model_to_load, state_dict, loaded_keys, start_prefix, expected_keys, device_map=device_map, offload_folder=offload_folder, offload_index=offload_index, state_dict_folder=state_dict_folder, state_dict_index=state_dict_index, dtype=dtype, hf_quantizer=hf_quantizer, is_safetensors=is_safetensors, keep_in_fp32_modules=keep_in_fp32_modules, unexpected_keys=unexpected_keys) else: assign_to_params_buffers = check_support_param_buffer_assignment(model_to_load, state_dict, start_prefix) error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix, assign_to_params_buffers) else: if not isinstance(resolved_archive_file, list): resolved_archive_file = [resolved_archive_file] error_msgs = [] mismatched_keys = [] if not is_safetensors: offload_index = {} if device_map is not None and 'disk' in device_map.values() else None if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} else: state_dict_folder = None state_dict_index = None if is_sharded_safetensors: disk_only_shard_files = get_disk_only_shard_files(device_map, sharded_metadata=sharded_metadata, start_prefix=start_prefix) disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files] else: disk_only_shard_files = [] if len(resolved_archive_file) > 1: resolved_archive_file = logging.tqdm(resolved_archive_file, desc='Loading checkpoint shards') assign_to_params_buffers = None for shard_file in resolved_archive_file: if shard_file in disk_only_shard_files: continue state_dict = load_state_dict(shard_file, is_quantized=is_quantized) mismatched_keys += _find_mismatched_keys(state_dict, model_state_dict, original_loaded_keys, add_prefix_to_model, remove_prefix_from_model, ignore_mismatched_sizes) if low_cpu_mem_usage: if is_fsdp_enabled() and (not is_local_dist_rank_0()) and (not is_quantized): for (key, param) in model_to_load.state_dict().items(): if param.device == torch.device('meta'): set_module_tensor_to_device(model_to_load, key, 'cpu', torch.empty(*param.size(), dtype=dtype)) else: (new_error_msgs, offload_index, state_dict_index) = _load_state_dict_into_meta_model(model_to_load, state_dict, loaded_keys, start_prefix, expected_keys, device_map=device_map, offload_folder=offload_folder, offload_index=offload_index, state_dict_folder=state_dict_folder, state_dict_index=state_dict_index, dtype=dtype, hf_quantizer=hf_quantizer, is_safetensors=is_safetensors, keep_in_fp32_modules=keep_in_fp32_modules, unexpected_keys=unexpected_keys) error_msgs += new_error_msgs else: if assign_to_params_buffers is None: assign_to_params_buffers = check_support_param_buffer_assignment(model_to_load, state_dict, start_prefix) error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix, assign_to_params_buffers) del state_dict gc.collect() if offload_index is not None and len(offload_index) > 0: if model != model_to_load: prefix = cls.base_model_prefix if not is_safetensors: for weight_name in offload_index: shutil.move(os.path.join(offload_folder, f'{weight_name}.dat'), os.path.join(offload_folder, f'{prefix}.{weight_name}.dat')) offload_index = {f'{prefix}.{key}': value for (key, value) in offload_index.items()} if not is_safetensors: save_offload_index(offload_index, offload_folder) offload_index = None if offload_state_dict: load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) if len(error_msgs) > 0: error_msg = '\n\t'.join(error_msgs) if 'size mismatch' in error_msg: error_msg += '\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.' raise RuntimeError(f'Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}') if len(unexpected_keys) > 0: archs = [] if model.config.architectures is None else model.config.architectures warner = logger.warning if model.__class__.__name__ in archs else logger.info warner(f'Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).') else: logger.info(f'All model checkpoint weights were used when initializing {model.__class__.__name__}.\n') if len(missing_keys) > 0: logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') elif len(mismatched_keys) == 0: logger.info(f'All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint was trained on, you can already use {model.__class__.__name__} for predictions without further training.') if len(mismatched_keys) > 0: mismatched_warning = '\n'.join([f'- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated' for (key, shape1, shape2) in mismatched_keys]) logger.warning(f'Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} and are newly initialized because the shapes did not match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.') return (model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs) def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False): module_keys = {'.'.join(key.split('.')[:-1]) for key in names} module_keys = module_keys.union({'.'.join(key.split('.')[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()}) retrieved_modules = [] for (name, module) in self.named_modules(): if remove_prefix: _prefix = f'{self.base_model_prefix}.' name = name[len(_prefix):] if name.startswith(_prefix) else name elif add_prefix: name = '.'.join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix if name in module_keys: retrieved_modules.append(module) return retrieved_modules @staticmethod def _load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file, start_prefix='', hf_quantizer=None, pretrained_model_name_or_path=None): _move_model_to_meta(model, loaded_state_dict_keys, start_prefix) state_dict = load_state_dict(resolved_archive_file) expected_keys = loaded_state_dict_keys error_msgs = _load_state_dict_into_meta_model(model, state_dict, loaded_state_dict_keys, start_prefix, expected_keys=expected_keys, hf_quantizer=hf_quantizer) return error_msgs @classmethod def register_for_auto_class(cls, auto_class='AutoModel'): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f'{auto_class} is not a valid auto class.') cls._auto_class = auto_class def to_bettertransformer(self) -> 'PreTrainedModel': if not is_optimum_available(): raise ImportError('The package `optimum` is required to use Better Transformer.') from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse('1.7.0'): raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.') from optimum.bettertransformer import BetterTransformer return BetterTransformer.transform(self) def reverse_bettertransformer(self): if not is_optimum_available(): raise ImportError('The package `optimum` is required to use Better Transformer.') from optimum.version import __version__ as optimum_version if version.parse(optimum_version) < version.parse('1.7.0'): raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.') from optimum.bettertransformer import BetterTransformer return BetterTransformer.reverse(self) def warn_if_padding_and_no_attention_mask(self, input_ids, attention_mask): if is_torch_fx_proxy(input_ids) or torch.jit.is_tracing() or is_torchdynamo_compiling(): return if attention_mask is not None or self.config.pad_token_id is None: return if self.config.pad_token_id in input_ids[:, [-1, 0]]: warn_string = 'We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.' if self.config.bos_token_id is not None and self.config.bos_token_id == self.config.pad_token_id or (self.config.eos_token_id is not None and self.config.eos_token_id == self.config.pad_token_id) or (self.config.sep_token_id is not None and self.config.sep_token_id == self.config.pad_token_id): warn_string += f'\nYou may ignore this warning if your `pad_token_id` ({self.config.pad_token_id}) is identical to the `bos_token_id` ({self.config.bos_token_id}), `eos_token_id` ({self.config.eos_token_id}), or the `sep_token_id` ({self.config.sep_token_id}), and your input is not padded.' logger.warning_once(warn_string) @property def _is_quantized_training_enabled(self): warnings.warn('`_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead', FutureWarning) if not hasattr(self, 'hf_quantizer'): return False return self.hf_quantizer.is_trainable PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) if PreTrainedModel.push_to_hub.__doc__ is not None: PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format(object='model', object_class='AutoModel', object_files='model file') class PoolerStartLogits(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if get_parameter_dtype(self) == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e+30 * p_mask return x class PoolerEndLogits(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None' if start_positions is not None: (slen, hsz) = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) start_states = hidden_states.gather(-2, start_positions) start_states = start_states.expand(-1, slen, -1) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if get_parameter_dtype(self) == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e+30 * p_mask return x class PoolerAnswerClass(nn.Module): def __init__(self, config): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor: hsz = hidden_states.shape[-1] assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None' if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) else: cls_token_state = hidden_states[:, -1, :] x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x @dataclass class SquadHeadOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None class SQuADHead(nn.Module): def __init__(self, config): super().__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) def forward(self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None, is_impossible: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None, return_dict: bool=False) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) total_loss += cls_loss * 0.5 return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: (bsz, slen, hsz) = hidden_states.size() start_log_probs = nn.functional.softmax(start_logits, dim=-1) (start_top_log_probs, start_top_index) = torch.topk(start_log_probs, self.start_n_top, dim=-1) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = nn.functional.softmax(end_logits, dim=1) (end_top_log_probs, end_top_index) = torch.topk(end_log_probs, self.end_n_top, dim=1) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum('blh,bl->bh', hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) if not return_dict: return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) else: return SquadHeadOutput(start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits) class SequenceSummary(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.summary_type = getattr(config, 'summary_type', 'last') if self.summary_type == 'attn': raise NotImplementedError self.summary = Identity() if hasattr(config, 'summary_use_proj') and config.summary_use_proj: if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0): num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, 'summary_activation', None) self.activation: Callable = get_activation(activation_string) if activation_string else Identity() self.first_dropout = Identity() if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward(self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor: if self.summary_type == 'last': output = hidden_states[:, -1] elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': output = hidden_states.mean(dim=1) elif self.summary_type == 'cls_index': if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) output = hidden_states.gather(-2, cls_index).squeeze(-2) elif self.summary_type == 'attn': raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def unwrap_model(model: nn.Module, recursive: bool=False) -> nn.Module: if is_accelerate_available(): kwargs = {} if recursive: if not is_accelerate_available('0.29.0'): raise RuntimeError('Setting `recursive=True` to `unwrap_model` requires `accelerate` v0.29.0. Please upgrade your version of accelerate') else: kwargs['recursive'] = recursive return extract_model_from_parallel(model, **kwargs) elif hasattr(model, 'module'): return unwrap_model(model.module) else: return model def expand_device_map(device_map, param_names, start_prefix): new_device_map = {} param_names = [p[len(start_prefix):] for p in param_names if p.startswith(start_prefix)] for (module, device) in device_map.items(): new_device_map.update({p: device for p in param_names if p == module or p.startswith(f'{module}.') or module == ''}) return new_device_map def get_disk_only_shard_files(device_map, sharded_metadata, start_prefix): weight_map = {p[len(start_prefix):]: v for (p, v) in sharded_metadata['weight_map'].items() if p.startswith(start_prefix)} files_content = collections.defaultdict(list) for (weight_name, filename) in weight_map.items(): while len(weight_name) > 0 and weight_name not in device_map: weight_name = '.'.join(weight_name.split('.')[:-1]) files_content[filename].append(device_map[weight_name]) return [fname for (fname, devices) in files_content.items() if set(devices) == {'disk'}] # File: transformers-main/src/transformers/models/__init__.py from . import albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_2, bloom, bridgetower, bros, byt5, camembert, canine, chameleon, chinese_clip, clap, clip, clipseg, clvp, code_llama, codegen, cohere, conditional_detr, convbert, convnext, convnextv2, cpm, cpmant, ctrl, cvt, dac, data2vec, dbrx, deberta, deberta_v2, decision_transformer, deformable_detr, deit, deprecated, depth_anything, detr, dialogpt, dinat, dinov2, distilbert, dit, donut, dpr, dpt, efficientnet, electra, encodec, encoder_decoder, ernie, esm, falcon, falcon_mamba, fastspeech2_conformer, flaubert, flava, fnet, focalnet, fsmt, funnel, fuyu, gemma, gemma2, git, glpn, gpt2, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_sw3, gptj, granite, grounding_dino, groupvit, herbert, hiera, hubert, ibert, idefics, idefics2, imagegpt, informer, instructblip, instructblipvideo, jamba, jetmoe, kosmos2, layoutlm, layoutlmv2, layoutlmv3, layoutxlm, led, levit, lilt, llama, llava, llava_next, llava_next_video, llava_onevision, longformer, longt5, luke, lxmert, m2m_100, mamba, mamba2, marian, markuplm, mask2former, maskformer, mbart, mbart50, megatron_bert, megatron_gpt2, mgp_str, mistral, mixtral, mluke, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, mpnet, mpt, mra, mt5, musicgen, musicgen_melody, mvp, nemotron, nllb, nllb_moe, nougat, nystromformer, olmo, olmoe, oneformer, openai, opt, owlv2, owlvit, paligemma, patchtsmixer, patchtst, pegasus, pegasus_x, perceiver, persimmon, phi, phi3, phobert, pix2struct, pixtral, plbart, poolformer, pop2piano, prophetnet, pvt, pvt_v2, qwen2, qwen2_audio, qwen2_moe, qwen2_vl, rag, recurrent_gemma, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rt_detr, rwkv, sam, seamless_m4t, seamless_m4t_v2, segformer, seggpt, sew, sew_d, siglip, speech_encoder_decoder, speech_to_text, speecht5, splinter, squeezebert, stablelm, starcoder2, superpoint, swiftformer, swin, swin2sr, swinv2, switch_transformers, t5, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, trocr, tvp, udop, umt5, unispeech, unispeech_sat, univnet, upernet, video_llava, videomae, vilt, vipllava, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_mae, vit_msn, vitdet, vitmatte, vits, vivit, wav2vec2, wav2vec2_bert, wav2vec2_conformer, wav2vec2_phoneme, wav2vec2_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, zoedepth # File: transformers-main/src/transformers/models/albert/__init__.py from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_albert import * from .modeling_albert import * from .modeling_flax_albert import * from .modeling_tf_albert import * from .tokenization_albert import * from .tokenization_albert_fast import * else: import sys _file = globals()['__file__'] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) # File: transformers-main/src/transformers/models/albert/configuration_albert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig class AlbertConfig(PretrainedConfig): model_type = 'albert' def __init__(self, vocab_size=30000, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, hidden_act='gelu_new', hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout_prob=0.1, position_embedding_type='absolute', pad_token_id=0, bos_token_id=2, eos_token_id=3, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout_prob = classifier_dropout_prob self.position_embedding_type = position_embedding_type class AlbertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) __all__ = ['AlbertConfig', 'AlbertOnnxConfig'] # File: transformers-main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py """""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path): config = AlbertConfig.from_json_file(albert_config_file) print(f'Building PyTorch model from configuration: {config}') model = AlbertForPreTraining(config) load_tf_weights_in_albert(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') torch.save(model.state_dict(), pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--albert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained ALBERT model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/albert/modeling_albert.py """""" import math import os from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, is_torch_greater_or_equal_than_2_2, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'albert/albert-base-v2' _CONFIG_FOR_DOC = 'AlbertConfig' def load_tf_weights_in_albert(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): print(name) for (name, array) in zip(names, arrays): original_name = name name = name.replace('module/', '') name = name.replace('ffn_1', 'ffn') name = name.replace('bert/', 'albert/') name = name.replace('attention_1', 'attention') name = name.replace('transform/', '') name = name.replace('LayerNorm_1', 'full_layer_layer_norm') name = name.replace('LayerNorm', 'attention/LayerNorm') name = name.replace('transformer/', '') name = name.replace('intermediate/dense/', '') name = name.replace('ffn/intermediate/output/dense/', 'ffn_output/') name = name.replace('/output/', '/') name = name.replace('/self/', '/') name = name.replace('pooler/dense', 'pooler') name = name.replace('cls/predictions', 'predictions') name = name.replace('predictions/attention', 'predictions') name = name.replace('embeddings/attention', 'embeddings') name = name.replace('inner_group_', 'albert_layers/') name = name.replace('group_', 'albert_layer_groups/') if len(name.split('/')) == 1 and ('output_bias' in name or 'output_weights' in name): name = 'classifier/' + name if 'seq_relationship' in name: name = name.replace('seq_relationship/output_', 'sop_classifier/classifier/') name = name.replace('weights', 'weight') name = name.split('/') if 'adam_m' in name or 'adam_v' in name or 'AdamWeightDecayOptimizer' in name or ('AdamWeightDecayOptimizer_1' in name) or ('global_step' in name): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except ValueError as e: e.args += (pointer.shape, array.shape) raise print(f'Initialize PyTorch weight {name} from {original_name}') pointer.data = torch.from_numpy(array) return model class AlbertEmbeddings(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlbertAttention(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads}') self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob) self.output_dropout = nn.Dropout(config.hidden_dropout_prob) self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def prune_heads(self, heads: List[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads) self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.dense = prune_linear_layer(self.dense, index, dim=1) self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.transpose(2, 1).flatten(2) projected_context_layer = self.dense(context_layer) projected_context_layer_dropout = self.output_dropout(projected_context_layer) layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout) return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,) class AlbertSdpaAttention(AlbertAttention): def __init__(self, config): super().__init__(config) self.dropout_prob = config.attention_probs_dropout_prob self.require_contiguous_qkv = not is_torch_greater_or_equal_than_2_2 def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: if self.position_embedding_type != 'absolute' or output_attentions or head_mask is not None: logger.warning('AlbertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the eager attention implementation, but specifying the eager implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states, attention_mask, head_mask, output_attentions) (batch_size, seq_len, _) = hidden_states.size() query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) if self.require_contiguous_qkv and query_layer.device.type == 'cuda' and (attention_mask is not None): query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() attention_output = torch.nn.functional.scaled_dot_product_attention(query=query_layer, key=key_layer, value=value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=False) attention_output = attention_output.transpose(1, 2) attention_output = attention_output.reshape(batch_size, seq_len, self.all_head_size) projected_context_layer = self.dense(attention_output) projected_context_layer_dropout = self.output_dropout(projected_context_layer) layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout) return (layernormed_context_layer,) ALBERT_ATTENTION_CLASSES = {'eager': AlbertAttention, 'sdpa': AlbertSdpaAttention} class AlbertLayer(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = ALBERT_ATTENTION_CLASSES[config._attn_implementation](config) self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False) -> Tuple[torch.Tensor, torch.Tensor]: attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions) ffn_output = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[0]) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0]) return (hidden_states,) + attention_output[1:] def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor: ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) return ffn_output class AlbertLayerGroup(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)]) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: layer_hidden_states = () layer_attentions = () for (layer_index, albert_layer) in enumerate(self.albert_layers): layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (layer_hidden_states,) if output_attentions: outputs = outputs + (layer_attentions,) return outputs class AlbertTransformer(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.config = config self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)]) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[BaseModelOutput, Tuple]: hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_hidden_states = (hidden_states,) if output_hidden_states else None all_attentions = () if output_attentions else None head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask for i in range(self.config.num_hidden_layers): layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx](hidden_states, attention_mask, head_mask[group_idx * layers_per_group:(group_idx + 1) * layers_per_group], output_attentions, output_hidden_states) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class AlbertPreTrainedModel(PreTrainedModel): config_class = AlbertConfig load_tf_weights = load_tf_weights_in_albert base_model_prefix = 'albert' _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class AlbertForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None sop_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ALBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Args:\n config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ALBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.', ALBERT_START_DOCSTRING) class AlbertModel(AlbertPreTrainedModel): config_class = AlbertConfig base_model_prefix = 'albert' def __init__(self, config: AlbertConfig, add_pooling_layer: bool=True): super().__init__(config) self.config = config self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertTransformer(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.pooler_activation = nn.Tanh() else: self.pooler = None self.pooler_activation = None self.attn_implementation = config._attn_implementation self.position_embedding_type = config.position_embedding_type self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Embedding) -> None: self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): group_idx = int(layer / self.config.inner_group_num) inner_group_idx = int(layer - group_idx * self.config.inner_group_num) self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutputWithPooling, Tuple]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) use_sdpa_attention_mask = self.attn_implementation == 'sdpa' and self.position_embedding_type == 'absolute' and (head_mask is None) and (not output_attentions) if use_sdpa_attention_mask: extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embedding_output.dtype, tgt_len=seq_length) else: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('\n Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n ', ALBERT_START_DOCSTRING) class AlbertForPreTraining(AlbertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'predictions.decoder.weight'] def __init__(self, config: AlbertConfig): super().__init__(config) self.albert = AlbertModel(config) self.predictions = AlbertMLMHead(config) self.sop_classifier = AlbertSOPHead(config) self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.predictions.decoder def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: self.predictions.decoder = new_embeddings def get_input_embeddings(self) -> nn.Embedding: return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, sentence_order_label: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[AlbertForPreTrainingOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] prediction_scores = self.predictions(sequence_output) sop_scores = self.sop_classifier(pooled_output) total_loss = None if labels is not None and sentence_order_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1)) total_loss = masked_lm_loss + sentence_order_loss if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return AlbertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class AlbertMLMHead(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.dense = nn.Linear(config.hidden_size, config.embedding_size) self.decoder = nn.Linear(config.embedding_size, config.vocab_size) self.activation = ACT2FN[config.hidden_act] self.decoder.bias = self.bias def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.decoder(hidden_states) prediction_scores = hidden_states return prediction_scores def _tie_weights(self) -> None: if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias class AlbertSOPHead(nn.Module): def __init__(self, config: AlbertConfig): super().__init__() self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, pooled_output: torch.Tensor) -> torch.Tensor: dropout_pooled_output = self.dropout(pooled_output) logits = self.classifier(dropout_pooled_output) return logits @add_start_docstrings('Albert Model with a `language modeling` head on top.', ALBERT_START_DOCSTRING) class AlbertForMaskedLM(AlbertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.albert = AlbertModel(config, add_pooling_layer=False) self.predictions = AlbertMLMHead(config) self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.predictions.decoder def set_output_embeddings(self, new_embeddings: nn.Linear) -> None: self.predictions.decoder = new_embeddings self.predictions.bias = new_embeddings.bias def get_input_embeddings(self) -> nn.Embedding: return self.albert.embeddings.word_embeddings @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_outputs = outputs[0] prediction_scores = self.predictions(sequence_outputs) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ALBERT_START_DOCSTRING) class AlbertForSequenceClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='textattack/albert-base-v2-imdb', output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ALBERT_START_DOCSTRING) class AlbertForTokenClassification(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) classifier_dropout_prob = config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ALBERT_START_DOCSTRING) class AlbertForQuestionAnswering(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.num_labels = config.num_labels self.albert = AlbertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='twmkn9/albert-base-v2-squad2', output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[AlbertForPreTrainingOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits: torch.Tensor = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ALBERT_START_DOCSTRING) class AlbertForMultipleChoice(AlbertPreTrainedModel): def __init__(self, config: AlbertConfig): super().__init__(config) self.albert = AlbertModel(config) self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[AlbertForPreTrainingOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.albert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits: torch.Tensor = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) __all__ = ['load_tf_weights_in_albert', 'AlbertPreTrainedModel', 'AlbertModel', 'AlbertForPreTraining', 'AlbertForMaskedLM', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertForQuestionAnswering', 'AlbertForMultipleChoice'] # File: transformers-main/src/transformers/models/albert/modeling_flax_albert.py from typing import Callable, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'albert/albert-base-v2' _CONFIG_FOR_DOC = 'AlbertConfig' @flax.struct.dataclass class FlaxAlbertForPreTrainingOutput(ModelOutput): prediction_logits: jnp.ndarray = None sop_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None ALBERT_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' ALBERT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n' class FlaxAlbertEmbeddings(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.word_embeddings = nn.Embed(self.config.vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.position_embeddings = nn.Embed(self.config.max_position_embeddings, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.token_type_embeddings = nn.Embed(self.config.type_vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, deterministic: bool=True): inputs_embeds = self.word_embeddings(input_ids.astype('i4')) position_embeds = self.position_embeddings(position_ids.astype('i4')) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype('i4')) hidden_states = inputs_embeds + token_type_embeddings + position_embeds hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxAlbertSelfAttention(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError('`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` : {self.config.num_attention_heads}') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions: bool=False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) value_states = self.value(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) key_states = self.key(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) projected_attn_output = self.dense(attn_output) projected_attn_output = self.dropout(projected_attn_output, deterministic=deterministic) layernormed_attn_output = self.LayerNorm(projected_attn_output + hidden_states) outputs = (layernormed_attn_output, attn_weights) if output_attentions else (layernormed_attn_output,) return outputs class FlaxAlbertLayer(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxAlbertSelfAttention(self.config, dtype=self.dtype) self.ffn = nn.Dense(self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.ffn_output = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.full_layer_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False): attention_outputs = self.attention(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions) attention_output = attention_outputs[0] ffn_output = self.ffn(attention_output) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(ffn_output) ffn_output = self.dropout(ffn_output, deterministic=deterministic) hidden_states = self.full_layer_layer_norm(ffn_output + attention_output) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) return outputs class FlaxAlbertLayerCollection(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxAlbertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.inner_group_num)] def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False): layer_hidden_states = () layer_attentions = () for (layer_index, albert_layer) in enumerate(self.layers): layer_output = albert_layer(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) outputs = (hidden_states,) if output_hidden_states: outputs = outputs + (layer_hidden_states,) if output_attentions: outputs = outputs + (layer_attentions,) return outputs class FlaxAlbertLayerCollections(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 layer_index: Optional[str] = None def setup(self): self.albert_layers = FlaxAlbertLayerCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False): outputs = self.albert_layers(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states) return outputs class FlaxAlbertLayerGroups(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxAlbertLayerCollections(self.config, name=str(i), layer_index=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_groups)] def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None for i in range(self.config.num_hidden_layers): group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) layer_group_output = self.layers[group_idx](hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxAlbertEncoder(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embedding_hidden_mapping_in = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.albert_layer_groups = FlaxAlbertLayerGroups(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): hidden_states = self.embedding_hidden_mapping_in(hidden_states) return self.albert_layer_groups(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states) class FlaxAlbertOnlyMLMHead(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param('bias', self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) hidden_states += self.bias return hidden_states class FlaxAlbertSOPHead(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout = nn.Dropout(self.config.classifier_dropout_prob) self.classifier = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output, deterministic=True): pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) return logits class FlaxAlbertPreTrainedModel(FlaxPreTrainedModel): config_class = AlbertConfig base_model_prefix = 'albert' module_class: nn.Module = None def __init__(self, config: AlbertConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(token_type_ids, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxAlbertModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True def setup(self): self.embeddings = FlaxAlbertEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxAlbertEncoder(self.config, dtype=self.dtype) if self.add_pooling_layer: self.pooler = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, name='pooler') self.pooler_activation = nn.tanh else: self.pooler = None self.pooler_activation = None def __call__(self, input_ids, attention_mask, token_type_ids: Optional[np.ndarray]=None, position_ids: Optional[np.ndarray]=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, deterministic=deterministic) outputs = self.encoder(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.add_pooling_layer: pooled = self.pooler(hidden_states[:, 0]) pooled = self.pooler_activation(pooled) else: pooled = None if not return_dict: if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPooling(last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('The bare Albert Model transformer outputting raw hidden-states without any specific head on top.', ALBERT_START_DOCSTRING) class FlaxAlbertModel(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertModule append_call_sample_docstring(FlaxAlbertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC) class FlaxAlbertForPreTrainingModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype) self.sop_classifier = FlaxAlbertSOPHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.tie_word_embeddings: shared_embedding = self.albert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None hidden_states = outputs[0] pooled_output = outputs[1] prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) sop_scores = self.sop_classifier(pooled_output, deterministic=deterministic) if not return_dict: return (prediction_scores, sop_scores) + outputs[2:] return FlaxAlbertForPreTrainingOutput(prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence order prediction (classification)` head.\n ', ALBERT_START_DOCSTRING) class FlaxAlbertForPreTraining(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForPreTrainingModule FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxAlbertForPreTraining\n\n >>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")\n >>> model = FlaxAlbertForPreTraining.from_pretrained("albert/albert-base-v2")\n\n >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.sop_logits\n ```\n' overwrite_call_docstring(FlaxAlbertForPreTraining, ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FLAX_ALBERT_FOR_PRETRAINING_DOCSTRING) append_replace_return_docstrings(FlaxAlbertForPreTraining, output_type=FlaxAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) class FlaxAlbertForMaskedLMModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype) self.predictions = FlaxAlbertOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.albert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None logits = self.predictions(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Albert Model with a `language modeling` head on top.', ALBERT_START_DOCSTRING) class FlaxAlbertForMaskedLM(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMaskedLMModule append_call_sample_docstring(FlaxAlbertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, revision='refs/pr/11') class FlaxAlbertForSequenceClassificationModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) classifier_dropout = self.config.classifier_dropout_prob if self.config.classifier_dropout_prob is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ALBERT_START_DOCSTRING) class FlaxAlbertForSequenceClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForSequenceClassificationModule append_call_sample_docstring(FlaxAlbertForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxAlbertForMultipleChoiceModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput(logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ALBERT_START_DOCSTRING) class FlaxAlbertForMultipleChoice(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForMultipleChoiceModule overwrite_call_docstring(FlaxAlbertForMultipleChoice, ALBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) append_call_sample_docstring(FlaxAlbertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC) class FlaxAlbertForTokenClassificationModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False) classifier_dropout = self.config.classifier_dropout_prob if self.config.classifier_dropout_prob is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ALBERT_START_DOCSTRING) class FlaxAlbertForTokenClassification(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForTokenClassificationModule append_call_sample_docstring(FlaxAlbertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC) class FlaxAlbertForQuestionAnsweringModule(nn.Module): config: AlbertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.albert = FlaxAlbertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.albert(input_ids, attention_mask, token_type_ids, position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ALBERT_START_DOCSTRING) class FlaxAlbertForQuestionAnswering(FlaxAlbertPreTrainedModel): module_class = FlaxAlbertForQuestionAnsweringModule append_call_sample_docstring(FlaxAlbertForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) __all__ = ['FlaxAlbertPreTrainedModel', 'FlaxAlbertModel', 'FlaxAlbertForPreTraining', 'FlaxAlbertForMaskedLM', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForTokenClassification', 'FlaxAlbertForQuestionAnswering'] # File: transformers-main/src/transformers/models/albert/modeling_tf_albert.py """""" from __future__ import annotations import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'albert/albert-base-v2' _CONFIG_FOR_DOC = 'AlbertConfig' class TFAlbertPreTrainingLoss: def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) if self.config.tf_legacy_loss: masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels['labels'], shape=(-1,)), -100) masked_lm_reduced_logits = tf.boolean_mask(tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])), mask=masked_lm_active_loss) masked_lm_labels = tf.boolean_mask(tensor=tf.reshape(tensor=labels['labels'], shape=(-1,)), mask=masked_lm_active_loss) sentence_order_active_loss = tf.not_equal(tf.reshape(tensor=labels['sentence_order_label'], shape=(-1,)), -100) sentence_order_reduced_logits = tf.boolean_mask(tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss) sentence_order_label = tf.boolean_mask(tensor=tf.reshape(tensor=labels['sentence_order_label'], shape=(-1,)), mask=sentence_order_active_loss) masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits) sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits) masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0])) masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0) return masked_lm_loss + sentence_order_loss unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels['labels']), y_pred=logits[0]) lm_loss_mask = tf.cast(labels['labels'] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) sop_logits = tf.reshape(logits[1], (-1, 2)) unmasked_sop_loss = loss_fn(y_true=tf.nn.relu(labels['sentence_order_label']), y_pred=sop_logits) sop_loss_mask = tf.cast(labels['sentence_order_label'] != -100, dtype=unmasked_sop_loss.dtype) masked_sop_loss = unmasked_sop_loss * sop_loss_mask reduced_masked_sop_loss = tf.reduce_sum(masked_sop_loss) / tf.reduce_sum(sop_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_sop_loss, (1,)) class TFAlbertEmbeddings(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('token_type_embeddings'): self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('Need to provide either `input_ids` or `input_embeds`.') if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFAlbertAttention(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.output_attentions = config.output_attentions self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.attention_dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.output_dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(input_tensor)[0] mixed_query_layer = self.query(inputs=input_tensor) mixed_key_layer = self.key(inputs=input_tensor) mixed_value_layer = self.value(inputs=input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.attention_dropout(inputs=attention_probs, training=training) if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size)) self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) hidden_states = self_outputs[0] hidden_states = self.dense(inputs=hidden_states) hidden_states = self.output_dropout(inputs=hidden_states, training=training) attention_output = self.LayerNorm(inputs=hidden_states + input_tensor) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFAlbertLayer(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFAlbertAttention(config, name='attention') self.ffn = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='ffn') if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.ffn_output = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='ffn_output') self.full_layer_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='full_layer_layer_norm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, training=training) ffn_output = self.ffn(inputs=attention_outputs[0]) ffn_output = self.activation(ffn_output) ffn_output = self.ffn_output(inputs=ffn_output) ffn_output = self.dropout(inputs=ffn_output, training=training) hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0]) outputs = (hidden_states,) + attention_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'ffn', None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build([None, None, self.config.hidden_size]) if getattr(self, 'ffn_output', None) is not None: with tf.name_scope(self.ffn_output.name): self.ffn_output.build([None, None, self.config.intermediate_size]) if getattr(self, 'full_layer_layer_norm', None) is not None: with tf.name_scope(self.full_layer_layer_norm.name): self.full_layer_layer_norm.build([None, None, self.config.hidden_size]) class TFAlbertLayerGroup(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.albert_layers = [TFAlbertLayer(config, name=f'albert_layers_._{i}') for i in range(config.inner_group_num)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: layer_hidden_states = () if output_hidden_states else None layer_attentions = () if output_attentions else None for (layer_index, albert_layer) in enumerate(self.albert_layers): if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) layer_output = albert_layer(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[layer_index], output_attentions=output_attentions, training=training) hidden_states = layer_output[0] if output_attentions: layer_attentions = layer_attentions + (layer_output[1],) if output_hidden_states: layer_hidden_states = layer_hidden_states + (hidden_states,) return tuple((v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert_layers', None) is not None: for layer in self.albert_layers: with tf.name_scope(layer.name): layer.build(None) class TFAlbertTransformer(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.num_hidden_layers = config.num_hidden_layers self.num_hidden_groups = config.num_hidden_groups self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups) self.embedding_hidden_mapping_in = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='embedding_hidden_mapping_in') self.albert_layer_groups = [TFAlbertLayerGroup(config, name=f'albert_layer_groups_._{i}') for i in range(config.num_hidden_groups)] self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states) all_attentions = () if output_attentions else None all_hidden_states = (hidden_states,) if output_hidden_states else None for i in range(self.num_hidden_layers): group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups)) layer_group_output = self.albert_layer_groups[group_idx](hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[group_idx * self.layers_per_group:(group_idx + 1) * self.layers_per_group], output_attentions=output_attentions, output_hidden_states=output_hidden_states, training=training) hidden_states = layer_group_output[0] if output_attentions: all_attentions = all_attentions + layer_group_output[-1] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embedding_hidden_mapping_in', None) is not None: with tf.name_scope(self.embedding_hidden_mapping_in.name): self.embedding_hidden_mapping_in.build([None, None, self.config.embedding_size]) if getattr(self, 'albert_layer_groups', None) is not None: for layer in self.albert_layer_groups: with tf.name_scope(layer.name): layer.build(None) class TFAlbertPreTrainedModel(TFPreTrainedModel): config_class = AlbertConfig base_model_prefix = 'albert' class TFAlbertMLMHead(keras.layers.Layer): def __init__(self, config: AlbertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.dense = keras.layers.Dense(config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') self.decoder_bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='decoder/bias') if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) def get_output_embeddings(self) -> keras.layers.Layer: return self.decoder def set_output_embeddings(self, value: tf.Variable): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {'bias': self.bias, 'decoder_bias': self.decoder_bias} def set_bias(self, value: tf.Variable): self.bias = value['bias'] self.decoder_bias = value['decoder_bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias) return hidden_states @keras_serializable class TFAlbertMainLayer(keras.layers.Layer): config_class = AlbertConfig def __init__(self, config: AlbertConfig, add_pooling_layer: bool=True, **kwargs): super().__init__(**kwargs) self.config = config self.embeddings = TFAlbertEmbeddings(config, name='embeddings') self.encoder = TFAlbertTransformer(config, name='encoder') self.pooler = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='pooler') if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, training=training) extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'pooler', None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build([None, None, self.config.hidden_size]) @dataclass class TFAlbertForPreTrainingOutput(ModelOutput): loss: tf.Tensor = None prediction_logits: tf.Tensor = None sop_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None ALBERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`AlbertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ALBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare Albert Model transformer outputting raw hidden-states without any specific head on top.', ALBERT_START_DOCSTRING) class TFAlbertModel(TFAlbertPreTrainedModel): def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name='albert') @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) @add_start_docstrings('\n Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order\n prediction` (classification) head.\n ', ALBERT_START_DOCSTRING) class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss): _keys_to_ignore_on_load_unexpected = ['predictions.decoder.weight'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name='albert') self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name='predictions') self.sop_classifier = TFAlbertSOPHead(config, name='sop_classifier') def get_lm_head(self) -> keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, sentence_order_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) (sequence_output, pooled_output) = outputs[:2] prediction_scores = self.predictions(hidden_states=sequence_output) sop_scores = self.sop_classifier(pooled_output=pooled_output, training=training) total_loss = None if labels is not None and sentence_order_label is not None: d_labels = {'labels': labels} d_labels['sentence_order_label'] = sentence_order_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores)) if not return_dict: output = (prediction_scores, sop_scores) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return TFAlbertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, sop_logits=sop_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'predictions', None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, 'sop_classifier', None) is not None: with tf.name_scope(self.sop_classifier.name): self.sop_classifier.build(None) class TFAlbertSOPHead(keras.layers.Layer): def __init__(self, config: AlbertConfig, **kwargs): super().__init__(**kwargs) self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor: dropout_pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=dropout_pooled_output) return logits def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('Albert Model with a `language modeling` head on top.', ALBERT_START_DOCSTRING) class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'predictions.decoder.weight'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name='albert') self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name='predictions') def get_lm_head(self) -> keras.layers.Layer: return self.predictions @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] prediction_scores = self.predictions(hidden_states=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'predictions', None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) @add_start_docstrings('\n Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ALBERT_START_DOCSTRING) class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_unexpected = ['predictions'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, name='albert') self.dropout = keras.layers.Dropout(rate=config.classifier_dropout_prob) self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='vumichien/albert-base-v2-imdb', output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'LABEL_1'", expected_loss=0.12) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ALBERT_START_DOCSTRING) class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'predictions'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name='albert') classifier_dropout_prob = config.classifier_dropout_prob if config.classifier_dropout_prob is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(rate=classifier_dropout_prob) self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ALBERT_START_DOCSTRING) class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'predictions'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name='albert') self.qa_outputs = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='vumichien/albert-base-v2-squad2', output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=12, qa_target_end_index=13, expected_output="'a nice puppet'", expected_loss=7.36) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) (start_logits, end_logits) = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ALBERT_START_DOCSTRING) class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'predictions'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: AlbertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.albert = TFAlbertMainLayer(config, name='albert') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense(units=1, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None outputs = self.albert(input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'albert', None) is not None: with tf.name_scope(self.albert.name): self.albert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) __all__ = ['TFAlbertPreTrainedModel', 'TFAlbertModel', 'TFAlbertForPreTraining', 'TFAlbertForMaskedLM', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertForQuestionAnswering', 'TFAlbertForMultipleChoice', 'TFAlbertMainLayer'] # File: transformers-main/src/transformers/models/albert/tokenization_albert.py """""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging from ...utils.import_utils import export logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} SPIECE_UNDERLINE = '▁' @export(backends=('sentencepiece',)) class AlbertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='', sep_token='[SEP]', pad_token='', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def vocab_size(self) -> int: return len(self.sp_model) def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace('``', '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text: str) -> List[str]: text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) __all__ = ['AlbertTokenizer'] # File: transformers-main/src/transformers/models/albert/tokenization_albert_fast.py """""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: AlbertTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SPIECE_UNDERLINE = '▁' class AlbertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = AlbertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='', sep_token='[SEP]', pad_token='', cls_token='[CLS]', mask_token='[MASK]', **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) __all__ = ['AlbertTokenizerFast'] # File: transformers-main/src/transformers/models/align/__init__.py from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_align import * from .modeling_align import * from .processing_align import * else: import sys _file = globals()['__file__'] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) # File: transformers-main/src/transformers/models/align/configuration_align.py """""" import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class AlignTextConfig(PretrainedConfig): model_type = 'align_text_model' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.pad_token_id = pad_token_id @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'align': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class AlignVisionConfig(PretrainedConfig): model_type = 'align_vision_model' def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: List[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: List[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: List[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: List[int]=[], strides: List[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: List[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: List[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, drop_connect_rate: float=0.2, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.width_coefficient = width_coefficient self.depth_coefficient = depth_coefficient self.depth_divisor = depth_divisor self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.depthwise_padding = depthwise_padding self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.squeeze_expansion_ratio = squeeze_expansion_ratio self.hidden_act = hidden_act self.hidden_dim = hidden_dim self.pooling_type = pooling_type self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.batch_norm_momentum = batch_norm_momentum self.drop_connect_rate = drop_connect_rate self.num_hidden_layers = sum(num_block_repeats) * 4 @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'align': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class AlignConfig(PretrainedConfig): model_type = 'align' def __init__(self, text_config=None, vision_config=None, projection_dim=640, temperature_init_value=1.0, initializer_range=0.02, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the AlignTextConfig with default values.') if vision_config is None: vision_config = {} logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.') self.text_config = AlignTextConfig(**text_config) self.vision_config = AlignVisionConfig(**vision_config) self.projection_dim = projection_dim self.temperature_init_value = temperature_init_value self.initializer_range = initializer_range @classmethod def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) __all__ = ['AlignTextConfig', 'AlignVisionConfig', 'AlignConfig'] # File: transformers-main/src/transformers/models/align/convert_align_tf_to_hf.py """""" import argparse import os import align import numpy as np import requests import tensorflow as tf import torch from PIL import Image from tokenizer import Tokenizer from transformers import AlignConfig, AlignModel, AlignProcessor, BertConfig, BertTokenizer, EfficientNetConfig, EfficientNetImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def preprocess(image): image = tf.image.resize(image, (346, 346)) image = tf.image.crop_to_bounding_box(image, (346 - 289) // 2, (346 - 289) // 2, 289, 289) return image def get_align_config(): vision_config = EfficientNetConfig.from_pretrained('google/efficientnet-b7') vision_config.image_size = 289 vision_config.hidden_dim = 640 vision_config.id2label = {'0': 'LABEL_0', '1': 'LABEL_1'} vision_config.label2id = {'LABEL_0': 0, 'LABEL_1': 1} vision_config.depthwise_padding = [] text_config = BertConfig() config = AlignConfig.from_text_vision_configs(text_config=text_config, vision_config=vision_config, projection_dim=640) return config def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im def get_processor(): image_processor = EfficientNetImageProcessor(do_center_crop=True, rescale_factor=1 / 127.5, rescale_offset=True, do_normalize=False, include_top=False, resample=Image.BILINEAR) tokenizer = BertTokenizer.from_pretrained('google-bert/bert-base-uncased') tokenizer.model_max_length = 64 processor = AlignProcessor(image_processor=image_processor, tokenizer=tokenizer) return processor def rename_keys(original_param_names): block_names = [v.split('_')[0].split('block')[1] for v in original_param_names if v.startswith('block')] block_names = list(set(block_names)) block_names = sorted(block_names) num_blocks = len(block_names) block_name_mapping = {b: str(i) for (b, i) in zip(block_names, range(num_blocks))} rename_keys = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight')) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight')) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias')) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean')) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var')) for b in block_names: hf_b = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight')) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight')) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias')) rename_keys.append((f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean')) rename_keys.append((f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var')) rename_keys.append((f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight')) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight')) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias')) rename_keys.append((f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean')) rename_keys.append((f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var')) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight')) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias')) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight')) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias')) rename_keys.append((f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight')) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight')) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias')) rename_keys.append((f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean')) rename_keys.append((f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var')) key_mapping = {} for item in rename_keys: if item[0] in original_param_names: key_mapping[item[0]] = 'vision_model.' + item[1] rename_keys = [] old = 'tf_bert_model/bert' new = 'text_model' for i in range(12): rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/query/kernel:0', f'{new}.encoder.layer.{i}.attention.self.query.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/query/bias:0', f'{new}.encoder.layer.{i}.attention.self.query.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/key/kernel:0', f'{new}.encoder.layer.{i}.attention.self.key.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/key/bias:0', f'{new}.encoder.layer.{i}.attention.self.key.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/value/kernel:0', f'{new}.encoder.layer.{i}.attention.self.value.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/self/value/bias:0', f'{new}.encoder.layer.{i}.attention.self.value.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/dense/kernel:0', f'{new}.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/dense/bias:0', f'{new}.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/LayerNorm/gamma:0', f'{new}.encoder.layer.{i}.attention.output.LayerNorm.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/attention/output/LayerNorm/beta:0', f'{new}.encoder.layer.{i}.attention.output.LayerNorm.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/intermediate/dense/kernel:0', f'{new}.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/intermediate/dense/bias:0', f'{new}.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/output/dense/kernel:0', f'{new}.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/output/dense/bias:0', f'{new}.encoder.layer.{i}.output.dense.bias')) rename_keys.append((f'{old}/encoder/layer_._{i}/output/LayerNorm/gamma:0', f'{new}.encoder.layer.{i}.output.LayerNorm.weight')) rename_keys.append((f'{old}/encoder/layer_._{i}/output/LayerNorm/beta:0', f'{new}.encoder.layer.{i}.output.LayerNorm.bias')) rename_keys.append((f'{old}/embeddings/word_embeddings/weight:0', f'{new}.embeddings.word_embeddings.weight')) rename_keys.append((f'{old}/embeddings/position_embeddings/embeddings:0', f'{new}.embeddings.position_embeddings.weight')) rename_keys.append((f'{old}/embeddings/token_type_embeddings/embeddings:0', f'{new}.embeddings.token_type_embeddings.weight')) rename_keys.append((f'{old}/embeddings/LayerNorm/gamma:0', f'{new}.embeddings.LayerNorm.weight')) rename_keys.append((f'{old}/embeddings/LayerNorm/beta:0', f'{new}.embeddings.LayerNorm.bias')) rename_keys.append((f'{old}/pooler/dense/kernel:0', f'{new}.pooler.dense.weight')) rename_keys.append((f'{old}/pooler/dense/bias:0', f'{new}.pooler.dense.bias')) rename_keys.append(('dense/kernel:0', 'text_projection.weight')) rename_keys.append(('dense/bias:0', 'text_projection.bias')) rename_keys.append(('dense/bias:0', 'text_projection.bias')) rename_keys.append(('temperature:0', 'temperature')) for item in rename_keys: if item[0] in original_param_names: key_mapping[item[0]] = item[1] return key_mapping def replace_params(hf_params, tf_params, key_mapping): list(hf_params.keys()) for (key, value) in tf_params.items(): if key not in key_mapping: continue hf_key = key_mapping[key] if '_conv' in key and 'kernel' in key: new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1) elif 'embeddings' in key: new_hf_value = torch.from_numpy(value) elif 'depthwise_kernel' in key: new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1) elif 'kernel' in key: new_hf_value = torch.from_numpy(np.transpose(value)) elif 'temperature' in key: new_hf_value = value elif 'bn/gamma' or 'bn/beta' in key: new_hf_value = torch.from_numpy(np.transpose(value)).squeeze() else: new_hf_value = torch.from_numpy(value) hf_params[hf_key].copy_(new_hf_value) @torch.no_grad() def convert_align_checkpoint(checkpoint_path, pytorch_dump_folder_path, save_model, push_to_hub): seq_length = 64 tok = Tokenizer(seq_length) original_model = align.Align('efficientnet-b7', 'bert-base', 640, seq_length, tok.get_vocab_size()) original_model.compile() original_model.load_weights(checkpoint_path) tf_params = original_model.trainable_variables tf_non_train_params = original_model.non_trainable_variables tf_params = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: tf_params[param.name] = param.numpy() tf_param_names = list(tf_params.keys()) config = get_align_config() hf_model = AlignModel(config).eval() hf_params = hf_model.state_dict() print('Converting parameters...') key_mapping = rename_keys(tf_param_names) replace_params(hf_params, tf_params, key_mapping) processor = get_processor() inputs = processor(images=prepare_img(), text='A picture of a cat', padding='max_length', max_length=64, return_tensors='pt') hf_model.eval() with torch.no_grad(): outputs = hf_model(**inputs) hf_image_features = outputs.image_embeds.detach().numpy() hf_text_features = outputs.text_embeds.detach().numpy() original_model.trainable = False tf_image_processor = EfficientNetImageProcessor(do_center_crop=True, do_rescale=False, do_normalize=False, include_top=False, resample=Image.BILINEAR) image = tf_image_processor(images=prepare_img(), return_tensors='tf', data_format='channels_last')['pixel_values'] text = tok(tf.constant(['A picture of a cat'])) image_features = original_model.image_encoder(image, training=False) text_features = original_model.text_encoder(text, training=False) image_features = tf.nn.l2_normalize(image_features, axis=-1) text_features = tf.nn.l2_normalize(text_features, axis=-1) if not np.allclose(image_features, hf_image_features, atol=0.001): raise ValueError('The predicted image features are not the same.') if not np.allclose(text_features, hf_text_features, atol=0.001): raise ValueError('The predicted text features are not the same.') print('Model outputs match!') if save_model: if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing converted ALIGN to the hub...') processor.push_to_hub('align-base') hf_model.push_to_hub('align-base') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', default='./weights/model-weights', type=str, help='Path to the pretrained TF ALIGN checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') args = parser.parse_args() convert_align_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) # File: transformers-main/src/transformers/models/align/modeling_align.py """""" import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndNoAttention from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'kakaobrain/align-base' _CONFIG_FOR_DOC = 'AlignConfig' ALIGN_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`AlignConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ALIGN_TEXT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" ALIGN_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' ALIGN_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @dataclass class AlignVisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class AlignTextModelOutput(ModelOutput): text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class AlignOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1) def align_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def round_filters(config: AlignVisionConfig, num_channels: int): divisor = config.depth_divisor num_channels *= config.width_coefficient new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) if new_dim < 0.9 * num_channels: new_dim += divisor return int(new_dim) def correct_pad(kernel_size: Union[int, Tuple], adjust: bool=True): if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) correct = (kernel_size[0] // 2, kernel_size[1] // 2) if adjust: return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) else: return (correct[1], correct[1], correct[0], correct[0]) class AlignVisionEmbeddings(nn.Module): def __init__(self, config: AlignVisionConfig): super().__init__() self.out_dim = round_filters(config, 32) self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) self.convolution = nn.Conv2d(config.num_channels, self.out_dim, kernel_size=3, stride=2, padding='valid', bias=False) self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.activation = ACT2FN[config.hidden_act] def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: features = self.padding(pixel_values) features = self.convolution(features) features = self.batchnorm(features) features = self.activation(features) return features class AlignVisionDepthwiseConv2d(nn.Conv2d): def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): out_channels = in_channels * depth_multiplier super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) class AlignVisionExpansionLayer(nn.Module): def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int): super().__init__() self.expand_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False) self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) self.expand_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.expand_conv(hidden_states) hidden_states = self.expand_bn(hidden_states) hidden_states = self.expand_act(hidden_states) return hidden_states class AlignVisionDepthwiseLayer(nn.Module): def __init__(self, config: AlignVisionConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool): super().__init__() self.stride = stride conv_pad = 'valid' if self.stride == 2 else 'same' padding = correct_pad(kernel_size, adjust=adjust_padding) self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) self.depthwise_conv = AlignVisionDepthwiseConv2d(in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False) self.depthwise_norm = nn.BatchNorm2d(num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.depthwise_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: if self.stride == 2: hidden_states = self.depthwise_conv_pad(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_norm(hidden_states) hidden_states = self.depthwise_act(hidden_states) return hidden_states class AlignVisionSqueezeExciteLayer(nn.Module): def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool=False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d(in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding='same') self.expand = nn.Conv2d(in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding='same') self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states class AlignVisionFinalBlockLayer(nn.Module): def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool): super().__init__() self.apply_dropout = stride == 1 and (not id_skip) self.project_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False) self.project_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.dropout = nn.Dropout(p=drop_rate) def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.project_conv(hidden_states) hidden_states = self.project_bn(hidden_states) if self.apply_dropout: hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + embeddings return hidden_states class AlignVisionBlock(nn.Module): def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool): super().__init__() self.expand_ratio = expand_ratio self.expand = True if self.expand_ratio != 1 else False expand_in_dim = in_dim * expand_ratio if self.expand: self.expansion = AlignVisionExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride) self.depthwise_conv = AlignVisionDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding) self.squeeze_excite = AlignVisionSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand) self.projection = AlignVisionFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: embeddings = hidden_states if self.expand_ratio != 1: hidden_states = self.expansion(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.squeeze_excite(hidden_states) hidden_states = self.projection(embeddings, hidden_states) return hidden_states class AlignVisionEncoder(nn.Module): def __init__(self, config: AlignVisionConfig): super().__init__() self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): return int(math.ceil(self.depth_coefficient * repeats)) num_base_blocks = len(config.in_channels) num_blocks = sum((round_repeats(n) for n in config.num_block_repeats)) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = True if j == 0 else False stride = 1 if j > 0 else stride in_dim = out_dim if j > 0 else in_dim adjust_padding = False if curr_block_num in config.depthwise_padding else True drop_rate = config.drop_connect_rate * curr_block_num / num_blocks block = AlignVisionBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithPoolingAndNoAttention: all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) class AlignTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlignTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class AlignTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ALIGN_TEXT_SELF_ATTENTION_CLASSES = {'eager': AlignTextSelfAttention} class AlignTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ALIGN_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = AlignTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class AlignTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class AlignTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class AlignTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = AlignTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = AlignTextAttention(config, position_embedding_type='absolute') self.intermediate = AlignTextIntermediate(config) self.output = AlignTextOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class AlignTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class AlignTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class AlignPreTrainedModel(PreTrainedModel): config_class = AlignConfig base_model_prefix = 'align' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, AlignModel): nn.init.xavier_uniform_(module.text_projection.weight) module.text_projection.bias.data.zero_() module.text_projection._is_hf_initialized = True elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings('The text model from ALIGN without any head or projection on top.', ALIGN_START_DOCSTRING) class AlignTextModel(AlignPreTrainedModel): config_class = AlignTextConfig _no_split_modules = ['AlignTextEmbeddings'] def __init__(self, config: AlignTextConfig, add_pooling_layer: bool=True): super().__init__(config) self.config = config self.embeddings = AlignTextEmbeddings(config) self.encoder = AlignTextEncoder(config) self.pooler = AlignTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('The vision model from ALIGN without any head or projection on top.', ALIGN_START_DOCSTRING) class AlignVisionModel(AlignPreTrainedModel): config_class = AlignVisionConfig main_input_name = 'pixel_values' supports_gradient_checkpointing = False def __init__(self, config: AlignVisionConfig): super().__init__(config) self.config = config self.embeddings = AlignVisionEmbeddings(config) self.encoder = AlignVisionEncoder(config) if config.pooling_type == 'mean': self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) elif config.pooling_type == 'max': self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) else: raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.convolution @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) pooled_output = pooled_output.reshape(pooled_output.shape[:2]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) @add_start_docstrings(ALIGN_START_DOCSTRING) class AlignModel(AlignPreTrainedModel): config_class = AlignConfig def __init__(self, config: AlignConfig): super().__init__(config) if not isinstance(config.text_config, AlignTextConfig): raise TypeError(f'config.text_config is expected to be of type AlignTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, AlignVisionConfig): raise TypeError(f'config.vision_config is expected to be of type AlignVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.text_model = AlignTextModel(text_config) self.vision_model = AlignVisionModel(vision_config) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim) self.temperature = nn.Parameter(torch.tensor(self.config.temperature_init_value)) self.post_init() @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = text_outputs[0][:, 0, :] text_features = self.text_projection(last_hidden_state) return text_features @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) image_features = vision_outputs[1] return image_features @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, AlignOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature logits_per_image = logits_per_text.t() loss = None if return_loss: loss = align_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return AlignOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) __all__ = ['AlignPreTrainedModel', 'AlignTextModel', 'AlignVisionModel', 'AlignModel'] # File: transformers-main/src/transformers/models/align/processing_align.py """""" from typing import List, Union try: from typing import Unpack except ImportError: from typing_extensions import Unpack from ...image_utils import ImageInput from ...processing_utils import ProcessingKwargs, ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput class AlignProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': 'max_length', 'max_length': 64}} class AlignProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'EfficientNetImageProcessor' tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, images: ImageInput=None, audio=None, videos=None, **kwargs: Unpack[AlignProcessorKwargs]) -> BatchEncoding: if text is None and images is None: raise ValueError('You must specify either text or images.') output_kwargs = self._merge_kwargs(AlignProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if text is not None: encoding = self.tokenizer(text, **output_kwargs['text_kwargs']) if images is not None: image_features = self.image_processor(images, **output_kwargs['images_kwargs']) if 'return_tensors' in output_kwargs['common_kwargs']: return_tensors = output_kwargs['common_kwargs'].pop('return_tensors', None) if text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ['AlignProcessor'] # File: transformers-main/src/transformers/models/altclip/__init__.py from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_altclip import * from .modeling_altclip import * from .processing_altclip import * else: import sys _file = globals()['__file__'] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) # File: transformers-main/src/transformers/models/altclip/configuration_altclip.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class AltCLIPTextConfig(PretrainedConfig): model_type = 'altclip_text_model' def __init__(self, vocab_size=250002, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, initializer_factor=0.02, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, project_dim=768, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.project_dim = project_dim class AltCLIPVisionConfig(PretrainedConfig): model_type = 'altclip_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'altclip': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class AltCLIPConfig(PretrainedConfig): model_type = 'altclip' def __init__(self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = AltCLIPTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = AltCLIPVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()} for (key, value) in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.') self.text_config = AltCLIPTextConfig(**text_config) self.vision_config = AltCLIPVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: AltCLIPVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) __all__ = ['AltCLIPTextConfig', 'AltCLIPVisionConfig', 'AltCLIPConfig'] # File: transformers-main/src/transformers/models/altclip/modeling_altclip.py """""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'BAAI/AltCLIP' _CONFIG_FOR_DOC = 'AltCLIPConfig' ALTCLIP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ALTCLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' ALTCLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' ALTCLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class AltCLIPOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class AltRobertaEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) class AltRobertaSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class AltRobertaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ALT_ROBERTA_SELF_ATTENTION_CLASSES = {'eager': AltRobertaSelfAttention} class AltRobertaAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ALT_ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = AltRobertaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class AltRobertaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class AltRobertaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class AltRobertaLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = AltRobertaAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = AltRobertaAttention(config, position_embedding_type='absolute') self.intermediate = AltRobertaIntermediate(config) self.output = AltRobertaOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class AltRobertaEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([AltRobertaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class AltRobertaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class AltCLIPAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class AltCLIPMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class AltCLIPEncoderLayer(nn.Module): def __init__(self, config: AltCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = AltCLIPAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = AltCLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class AltCLIPEncoder(nn.Module): def __init__(self, config: AltCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class AltCLIPVisionEmbeddings(nn.Module): def __init__(self, config: AltCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class AltCLIPPreTrainedModel(PreTrainedModel): config_class = AltCLIPConfig base_model_prefix = 'altclip' supports_gradient_checkpointing = True _no_split_module = [] def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, AltCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, AltCLIPAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, AltCLIPMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, AltCLIPModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor) module.text_projection._is_hf_initialized = True nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor) module.visual_projection._is_hf_initialized = True elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() class AltCLIPVisionTransformer(nn.Module): def __init__(self, config: AltCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = AltCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = AltCLIPEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class AltCLIPVisionModel(AltCLIPPreTrainedModel): config_class = AltCLIPVisionConfig main_input_name = 'pixel_values' def __init__(self, config: AltCLIPVisionConfig): super().__init__(config) self.vision_model = AltCLIPVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class AltRobertaModel(AltCLIPPreTrainedModel): config_class = AltCLIPTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = AltRobertaEmbeddings(config) self.encoder = AltRobertaEncoder(config) self.pooler = AltRobertaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) class AltCLIPTextModel(AltCLIPPreTrainedModel): config_class = AltCLIPTextConfig def __init__(self, config): super().__init__(config) self.roberta = AltRobertaModel(config, add_pooling_layer=False) self.transformation = nn.Linear(config.hidden_size, config.project_dim) self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.roberta.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Embedding) -> None: self.roberta.embeddings.word_embeddings = value def resize_token_embeddings(self, new_num_tokens: Optional[int]=None) -> nn.Embedding: return super().resize_token_embeddings(new_num_tokens) @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndProjection, config_class=AltCLIPTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.pre_LN(sequence_output) projection_state = self.transformation(sequence_output) pooler_output = projection_state[:, 0] if not return_dict: return (projection_state, pooler_output) + outputs[2:4] return BaseModelOutputWithPoolingAndProjection(last_hidden_state=projection_state, pooler_output=pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class AltCLIPModel(AltCLIPPreTrainedModel): config_class = AltCLIPConfig def __init__(self, config: AltCLIPConfig): super().__init__(config) if not isinstance(config.vision_config, AltCLIPVisionConfig): raise TypeError(f'config.vision_config is expected to be of type AltCLIPVisionConfig but is of type {type(config.vision_config)}.') if not isinstance(config.text_config, AltCLIPTextConfig): raise TypeError(f'config.text_config is expected to be of type AltCLIPTextConfig but is of type {type(config.text_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.project_dim self.vision_embed_dim = vision_config.hidden_size self.text_model = AltCLIPTextModel(text_config) self.vision_model = AltCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, token_type_ids=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(ALTCLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AltCLIPOutput, config_class=AltCLIPConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, AltCLIPOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.T loss = None if return_loss: loss = clip_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return AltCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx __all__ = ['AltCLIPPreTrainedModel', 'AltCLIPVisionModel', 'AltCLIPTextModel', 'AltCLIPModel'] # File: transformers-main/src/transformers/models/altclip/processing_altclip.py """""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class AltCLIPProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'CLIPImageProcessor' tokenizer_class = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, return_tensors=None, **kwargs): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ['AltCLIPProcessor'] # File: transformers-main/src/transformers/models/audio_spectrogram_transformer/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_audio_spectrogram_transformer': ['ASTConfig'], 'feature_extraction_audio_spectrogram_transformer': ['ASTFeatureExtractor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_audio_spectrogram_transformer'] = ['ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ASTConfig from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ASTForAudioClassification, ASTModel, ASTPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py """""" from typing import Any, Dict from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ASTConfig(PretrainedConfig): model_type = 'audio-spectrogram-transformer' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.patch_size = patch_size self.qkv_bias = qkv_bias self.frequency_stride = frequency_stride self.time_stride = time_stride self.max_length = max_length self.num_mel_bins = num_mel_bins def _get_non_default_generation_parameters(self) -> Dict[str, Any]: return {} # File: transformers-main/src/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py """""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_audio_spectrogram_transformer_config(model_name): config = ASTConfig() if '10-10' in model_name: pass elif 'speech-commands' in model_name: config.max_length = 128 elif '12-12' in model_name: config.time_stride = 12 config.frequency_stride = 12 elif '14-14' in model_name: config.time_stride = 14 config.frequency_stride = 14 elif '16-16' in model_name: config.time_stride = 16 config.frequency_stride = 16 else: raise ValueError('Model not supported') repo_id = 'huggingface/label-files' if 'speech-commands' in model_name: config.num_labels = 35 filename = 'speech-commands-v2-id2label.json' else: config.num_labels = 527 filename = 'audioset-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} return config def rename_key(name): if 'module.v' in name: name = name.replace('module.v', 'audio_spectrogram_transformer') if 'cls_token' in name: name = name.replace('cls_token', 'embeddings.cls_token') if 'dist_token' in name: name = name.replace('dist_token', 'embeddings.distillation_token') if 'pos_embed' in name: name = name.replace('pos_embed', 'embeddings.position_embeddings') if 'patch_embed.proj' in name: name = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection') if 'blocks' in name: name = name.replace('blocks', 'encoder.layer') if 'attn.proj' in name: name = name.replace('attn.proj', 'attention.output.dense') if 'attn' in name: name = name.replace('attn', 'attention.self') if 'norm1' in name: name = name.replace('norm1', 'layernorm_before') if 'norm2' in name: name = name.replace('norm2', 'layernorm_after') if 'mlp.fc1' in name: name = name.replace('mlp.fc1', 'intermediate.dense') if 'mlp.fc2' in name: name = name.replace('mlp.fc2', 'output.dense') if 'audio_spectrogram_transformer.norm' in name: name = name.replace('audio_spectrogram_transformer.norm', 'audio_spectrogram_transformer.layernorm') if 'module.mlp_head.0' in name: name = name.replace('module.mlp_head.0', 'classifier.layernorm') if 'module.mlp_head.1' in name: name = name.replace('module.mlp_head.1', 'classifier.dense') return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if 'qkv' in key: key_split = key.split('.') layer_num = int(key_split[3]) dim = config.hidden_size if 'weight' in key: orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.weight'] = val[:dim, :] orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.weight'] = val[dim:dim * 2, :] orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.weight'] = val[-dim:, :] else: orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.bias'] = val[:dim] orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.bias'] = val[dim:dim * 2] orig_state_dict[f'audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.bias'] = val[-dim:] else: orig_state_dict[rename_key(key)] = val return orig_state_dict def remove_keys(state_dict): ignore_keys = ['module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias'] for k in ignore_keys: state_dict.pop(k, None) @torch.no_grad() def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): config = get_audio_spectrogram_transformer_config(model_name) model_name_to_url = {'ast-finetuned-audioset-10-10-0.4593': 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1', 'ast-finetuned-audioset-10-10-0.450': 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1', 'ast-finetuned-audioset-10-10-0.448': 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1', 'ast-finetuned-audioset-10-10-0.448-v2': 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1', 'ast-finetuned-audioset-12-12-0.447': 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1', 'ast-finetuned-audioset-14-14-0.443': 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1', 'ast-finetuned-audioset-16-16-0.442': 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1', 'ast-finetuned-speech-commands-v2': 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'} checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu') remove_keys(state_dict) new_state_dict = convert_state_dict(state_dict, config) model = ASTForAudioClassification(config) model.eval() model.load_state_dict(new_state_dict) mean = -4.2677393 if 'speech-commands' not in model_name else -6.845978 std = 4.5689974 if 'speech-commands' not in model_name else 5.5654526 max_length = 1024 if 'speech-commands' not in model_name else 128 feature_extractor = ASTFeatureExtractor(mean=mean, std=std, max_length=max_length) if 'speech-commands' in model_name: dataset = load_dataset('google/speech_commands', 'v0.02', split='validation', trust_remote_code=True) waveform = dataset[0]['audio']['array'] else: filepath = hf_hub_download(repo_id='nielsr/audio-spectogram-transformer-checkpoint', filename='sample_audio.flac', repo_type='dataset') (waveform, _) = torchaudio.load(filepath) waveform = waveform.squeeze().numpy() inputs = feature_extractor(waveform, sampling_rate=16000, return_tensors='pt') outputs = model(**inputs) logits = outputs.logits if model_name == 'ast-finetuned-audioset-10-10-0.4593': expected_slice = torch.tensor([-0.876, -7.0042, -8.6602]) elif model_name == 'ast-finetuned-audioset-10-10-0.450': expected_slice = torch.tensor([-1.1986, -7.0903, -8.2718]) elif model_name == 'ast-finetuned-audioset-10-10-0.448': expected_slice = torch.tensor([-2.6128, -8.008, -9.4344]) elif model_name == 'ast-finetuned-audioset-10-10-0.448-v2': expected_slice = torch.tensor([-1.508, -7.4534, -8.8917]) elif model_name == 'ast-finetuned-audioset-12-12-0.447': expected_slice = torch.tensor([-0.505, -6.5833, -8.0843]) elif model_name == 'ast-finetuned-audioset-14-14-0.443': expected_slice = torch.tensor([-0.3826, -7.0336, -8.2413]) elif model_name == 'ast-finetuned-audioset-16-16-0.442': expected_slice = torch.tensor([-1.2113, -6.9101, -8.347]) elif model_name == 'ast-finetuned-speech-commands-v2': expected_slice = torch.tensor([6.1589, -8.0566, -8.7984]) else: raise ValueError('Unknown model name') if not torch.allclose(logits[0, :3], expected_slice, atol=0.0001): raise ValueError("Logits don't match") print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving feature extractor to {pytorch_dump_folder_path}') feature_extractor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model and feature extractor to the hub...') model.push_to_hub(f'MIT/{model_name}') feature_extractor.push_to_hub(f'MIT/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help="Name of the Audio Spectrogram Transformer model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.') args = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py """""" from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, is_speech_available, is_torch_available, logging if is_speech_available(): import torchaudio.compliance.kaldi as ta_kaldi if is_torch_available(): import torch logger = logging.get_logger(__name__) class ASTFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_values', 'attention_mask'] def __init__(self, feature_size=1, sampling_rate=16000, num_mel_bins=128, max_length=1024, padding_value=0.0, do_normalize=True, mean=-4.2677393, std=4.5689974, return_attention_mask=False, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.num_mel_bins = num_mel_bins self.max_length = max_length self.do_normalize = do_normalize self.mean = mean self.std = std self.return_attention_mask = return_attention_mask if not is_speech_available(): mel_filters = mel_filter_bank(num_frequency_bins=256, num_mel_filters=self.num_mel_bins, min_frequency=20, max_frequency=sampling_rate // 2, sampling_rate=sampling_rate, norm=None, mel_scale='kaldi', triangularize_in_mel_space=True) self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) self.window = window_function(400, 'hann', periodic=False) def _extract_fbank_features(self, waveform: np.ndarray, max_length: int) -> np.ndarray: if is_speech_available(): waveform = torch.from_numpy(waveform).unsqueeze(0) fbank = ta_kaldi.fbank(waveform, sample_frequency=self.sampling_rate, window_type='hanning', num_mel_bins=self.num_mel_bins) else: waveform = np.squeeze(waveform) fbank = spectrogram(waveform, self.window, frame_length=400, hop_length=160, fft_length=512, power=2.0, center=False, preemphasis=0.97, mel_filters=self.mel_filters, log_mel='log', mel_floor=1.192092955078125e-07, remove_dc_offset=True).T fbank = torch.from_numpy(fbank) n_frames = fbank.shape[0] difference = max_length - n_frames if difference > 0: pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference)) fbank = pad_module(fbank) elif difference < 0: fbank = fbank[0:max_length, :] fbank = fbank.numpy() return fbank def normalize(self, input_values: np.ndarray) -> np.ndarray: return (input_values - self.mean) / (self.std * 2) def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [raw_speech] features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech] padded_inputs = BatchFeature({'input_values': features}) input_values = padded_inputs.get('input_values') if isinstance(input_values[0], list): padded_inputs['input_values'] = [np.asarray(feature, dtype=np.float32) for feature in input_values] if self.do_normalize: padded_inputs['input_values'] = [self.normalize(feature) for feature in input_values] if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs # File: transformers-main/src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py """""" import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_audio_spectrogram_transformer import ASTConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'ASTConfig' _CHECKPOINT_FOR_DOC = 'MIT/ast-finetuned-audioset-10-10-0.4593' _EXPECTED_OUTPUT_SHAPE = [1, 1214, 768] _SEQ_CLASS_CHECKPOINT = 'MIT/ast-finetuned-audioset-10-10-0.4593' _SEQ_CLASS_EXPECTED_OUTPUT = "'Speech'" _SEQ_CLASS_EXPECTED_LOSS = 0.17 class ASTEmbeddings(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = ASTPatchEmbeddings(config) (frequency_out_dimension, time_out_dimension) = self.get_shape(config) num_patches = frequency_out_dimension * time_out_dimension self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def get_shape(self, config): frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1 time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1 return (frequency_out_dimension, time_out_dimension) def forward(self, input_values: torch.Tensor) -> torch.Tensor: batch_size = input_values.shape[0] embeddings = self.patch_embeddings(input_values) cls_tokens = self.cls_token.expand(batch_size, -1, -1) distillation_tokens = self.distillation_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1) embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class ASTPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() patch_size = config.patch_size frequency_stride = config.frequency_stride time_stride = config.time_stride self.projection = nn.Conv2d(1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride)) def forward(self, input_values: torch.Tensor) -> torch.Tensor: input_values = input_values.unsqueeze(1) input_values = input_values.transpose(2, 3) embeddings = self.projection(input_values).flatten(2).transpose(1, 2) return embeddings class ASTSelfAttention(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class ASTSdpaSelfAttention(ASTSelfAttention): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.attention_probs_dropout_prob = config.attention_probs_dropout_prob def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, head_mask, self.attention_probs_dropout_prob if self.training else 0.0, is_causal=False, scale=None) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return (context_layer, None) class ASTSelfOutput(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ASTAttention(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.attention = ASTSelfAttention(config) self.output = ASTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ASTSdpaAttention(ASTAttention): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.attention = ASTSdpaSelfAttention(config) class ASTIntermediate(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ASTOutput(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states AST_ATTENTION_CLASSES = {'eager': ASTAttention, 'sdpa': ASTSdpaAttention} class ASTLayer(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = AST_ATTENTION_CLASSES[config._attn_implementation](config) self.intermediate = ASTIntermediate(config) self.output = ASTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class ASTEncoder(nn.Module): def __init__(self, config: ASTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class ASTPreTrainedModel(PreTrainedModel): config_class = ASTConfig base_model_prefix = 'audio_spectrogram_transformer' main_input_name = 'input_values' supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ASTConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING = '\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):\n Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by\n loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via\n the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the\n [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a\n tensor of type `torch.FloatTensor`. See [`~ASTFeatureExtractor.__call__`]\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare AST Model transformer outputting raw hidden-states without any specific head on top.', AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING) class ASTModel(ASTPreTrainedModel): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.config = config self.embeddings = ASTEmbeddings(config) self.encoder = ASTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self) -> ASTPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_values is None: raise ValueError('You have to specify input_values') head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_values) encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2 if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class ASTMLPHead(nn.Module): def __init__(self, config: ASTConfig): super().__init__() self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() def forward(self, hidden_state): hidden_state = self.layernorm(hidden_state) hidden_state = self.dense(hidden_state) return hidden_state @add_start_docstrings('\n Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled\n output) e.g. for datasets like AudioSet, Speech Commands v2.\n ', AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING) class ASTForAudioClassification(ASTPreTrainedModel): def __init__(self, config: ASTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.audio_spectrogram_transformer = ASTModel(config) self.classifier = ASTMLPHead(config) self.post_init() @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_SEQ_CLASS_CHECKPOINT, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS) def forward(self, input_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.audio_spectrogram_transformer(input_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/auto/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = {'auto_factory': ['get_values'], 'configuration_auto': ['CONFIG_MAPPING', 'MODEL_NAMES_MAPPING', 'AutoConfig'], 'feature_extraction_auto': ['FEATURE_EXTRACTOR_MAPPING', 'AutoFeatureExtractor'], 'image_processing_auto': ['IMAGE_PROCESSOR_MAPPING', 'AutoImageProcessor'], 'processing_auto': ['PROCESSOR_MAPPING', 'AutoProcessor'], 'tokenization_auto': ['TOKENIZER_MAPPING', 'AutoTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_auto'] = ['MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING', 'MODEL_FOR_BACKBONE_MAPPING', 'MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING', 'MODEL_FOR_CAUSAL_LM_MAPPING', 'MODEL_FOR_CTC_MAPPING', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'MODEL_FOR_IMAGE_MAPPING', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING', 'MODEL_FOR_IMAGE_TO_IMAGE_MAPPING', 'MODEL_FOR_KEYPOINT_DETECTION_MAPPING', 'MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING', 'MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING', 'MODEL_FOR_MASKED_LM_MAPPING', 'MODEL_FOR_MASK_GENERATION_MAPPING', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'MODEL_FOR_OBJECT_DETECTION_MAPPING', 'MODEL_FOR_PRETRAINING_MAPPING', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING', 'MODEL_FOR_TEXT_ENCODING_MAPPING', 'MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING', 'MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING', 'MODEL_FOR_VISION_2_SEQ_MAPPING', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING', 'MODEL_MAPPING', 'MODEL_WITH_LM_HEAD_MAPPING', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING', 'MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING', 'MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING', 'AutoModel', 'AutoBackbone', 'AutoModelForAudioClassification', 'AutoModelForAudioFrameClassification', 'AutoModelForAudioXVector', 'AutoModelForCausalLM', 'AutoModelForCTC', 'AutoModelForDepthEstimation', 'AutoModelForImageClassification', 'AutoModelForImageSegmentation', 'AutoModelForImageToImage', 'AutoModelForInstanceSegmentation', 'AutoModelForKeypointDetection', 'AutoModelForMaskGeneration', 'AutoModelForTextEncoding', 'AutoModelForMaskedImageModeling', 'AutoModelForMaskedLM', 'AutoModelForMultipleChoice', 'AutoModelForNextSentencePrediction', 'AutoModelForObjectDetection', 'AutoModelForPreTraining', 'AutoModelForQuestionAnswering', 'AutoModelForSemanticSegmentation', 'AutoModelForSeq2SeqLM', 'AutoModelForSequenceClassification', 'AutoModelForSpeechSeq2Seq', 'AutoModelForTableQuestionAnswering', 'AutoModelForTextToSpectrogram', 'AutoModelForTextToWaveform', 'AutoModelForTokenClassification', 'AutoModelForUniversalSegmentation', 'AutoModelForVideoClassification', 'AutoModelForVision2Seq', 'AutoModelForVisualQuestionAnswering', 'AutoModelForDocumentQuestionAnswering', 'AutoModelWithLMHead', 'AutoModelForZeroShotImageClassification', 'AutoModelForZeroShotObjectDetection'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_auto'] = ['TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_CAUSAL_LM_MAPPING', 'TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_MASK_GENERATION_MAPPING', 'TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING', 'TF_MODEL_FOR_MASKED_LM_MAPPING', 'TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'TF_MODEL_FOR_PRETRAINING_MAPPING', 'TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING', 'TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING', 'TF_MODEL_FOR_TEXT_ENCODING_MAPPING', 'TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'TF_MODEL_FOR_VISION_2_SEQ_MAPPING', 'TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING', 'TF_MODEL_MAPPING', 'TF_MODEL_WITH_LM_HEAD_MAPPING', 'TFAutoModel', 'TFAutoModelForAudioClassification', 'TFAutoModelForCausalLM', 'TFAutoModelForImageClassification', 'TFAutoModelForMaskedImageModeling', 'TFAutoModelForMaskedLM', 'TFAutoModelForMaskGeneration', 'TFAutoModelForMultipleChoice', 'TFAutoModelForNextSentencePrediction', 'TFAutoModelForPreTraining', 'TFAutoModelForDocumentQuestionAnswering', 'TFAutoModelForQuestionAnswering', 'TFAutoModelForSemanticSegmentation', 'TFAutoModelForSeq2SeqLM', 'TFAutoModelForSequenceClassification', 'TFAutoModelForSpeechSeq2Seq', 'TFAutoModelForTableQuestionAnswering', 'TFAutoModelForTextEncoding', 'TFAutoModelForTokenClassification', 'TFAutoModelForVision2Seq', 'TFAutoModelForZeroShotImageClassification', 'TFAutoModelWithLMHead'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_auto'] = ['FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_CAUSAL_LM_MAPPING', 'FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_MASKED_LM_MAPPING', 'FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING', 'FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING', 'FLAX_MODEL_FOR_PRETRAINING_MAPPING', 'FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING', 'FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING', 'FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING', 'FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING', 'FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING', 'FLAX_MODEL_MAPPING', 'FlaxAutoModel', 'FlaxAutoModelForCausalLM', 'FlaxAutoModelForImageClassification', 'FlaxAutoModelForMaskedLM', 'FlaxAutoModelForMultipleChoice', 'FlaxAutoModelForNextSentencePrediction', 'FlaxAutoModelForPreTraining', 'FlaxAutoModelForQuestionAnswering', 'FlaxAutoModelForSeq2SeqLM', 'FlaxAutoModelForSequenceClassification', 'FlaxAutoModelForSpeechSeq2Seq', 'FlaxAutoModelForTokenClassification', 'FlaxAutoModelForVision2Seq'] if TYPE_CHECKING: from .auto_factory import get_values from .configuration_auto import CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor from .processing_auto import PROCESSOR_MAPPING, AutoProcessor from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_auto import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, MODEL_FOR_AUDIO_XVECTOR_MAPPING, MODEL_FOR_BACKBONE_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, MODEL_FOR_DEPTH_ESTIMATION_MAPPING, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_KEYPOINT_DETECTION_MAPPING, MODEL_FOR_MASK_GENERATION_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TEXT_ENCODING_MAPPING, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoBackbone, AutoModel, AutoModelForAudioClassification, AutoModelForAudioFrameClassification, AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, AutoModelForDepthEstimation, AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForImageToImage, AutoModelForInstanceSegmentation, AutoModelForKeypointDetection, AutoModelForMaskedImageModeling, AutoModelForMaskedLM, AutoModelForMaskGeneration, AutoModelForMultipleChoice, AutoModelForNextSentencePrediction, AutoModelForObjectDetection, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTextEncoding, AutoModelForTextToSpectrogram, AutoModelForTextToWaveform, AutoModelForTokenClassification, AutoModelForUniversalSegmentation, AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelForZeroShotImageClassification, AutoModelForZeroShotObjectDetection, AutoModelWithLMHead try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_auto import TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TEXT_ENCODING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForAudioClassification, TFAutoModelForCausalLM, TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedImageModeling, TFAutoModelForMaskedLM, TFAutoModelForMaskGeneration, TFAutoModelForMultipleChoice, TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForSpeechSeq2Seq, TFAutoModelForTableQuestionAnswering, TFAutoModelForTextEncoding, TFAutoModelForTokenClassification, TFAutoModelForVision2Seq, TFAutoModelForZeroShotImageClassification, TFAutoModelWithLMHead try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_auto import FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, FLAX_MODEL_FOR_PRETRAINING_MAPPING, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForCausalLM, FlaxAutoModelForImageClassification, FlaxAutoModelForMaskedLM, FlaxAutoModelForMultipleChoice, FlaxAutoModelForNextSentencePrediction, FlaxAutoModelForPreTraining, FlaxAutoModelForQuestionAnswering, FlaxAutoModelForSeq2SeqLM, FlaxAutoModelForSequenceClassification, FlaxAutoModelForSpeechSeq2Seq, FlaxAutoModelForTokenClassification, FlaxAutoModelForVision2Seq else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/auto/auto_factory.py """""" import copy import importlib import json import warnings from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...utils import CONFIG_NAME, cached_file, copy_func, extract_commit_hash, find_adapter_config_file, is_peft_available, logging, requires_backends from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings logger = logging.get_logger(__name__) CLASS_DOCSTRING = '\n This is a generic model class that will be instantiated as one of the model classes of the library when created\n with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class\n method.\n\n This class cannot be instantiated directly using `__init__()` (throws an error).\n' FROM_CONFIG_DOCSTRING = '\n Instantiates one of the model classes of the library from a configuration.\n\n Note:\n Loading a model from its configuration file does **not** load the model weights. It only affects the\n model\'s configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.\n\n Args:\n config ([`PretrainedConfig`]):\n The model class to instantiate is selected based on the configuration class:\n\n List options\n attn_implementation (`str`, *optional*):\n The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.\n\n Examples:\n\n ```python\n >>> from transformers import AutoConfig, BaseAutoModelClass\n\n >>> # Download configuration from huggingface.co and cache.\n >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")\n >>> model = BaseAutoModelClass.from_config(config)\n ```\n' FROM_PRETRAINED_TORCH_DOCSTRING = '\n Instantiate one of the model classes of the library from a pretrained model.\n\n The model class to instantiate is selected based on the `model_type` property of the config object (either\n passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it\'s missing, by\n falling back to using pattern matching on `pretrained_model_name_or_path`:\n\n List options\n\n The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are\n deactivated). To train the model, you should first set it back in training mode with `model.train()`\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n model_args (additional positional arguments, *optional*):\n Will be passed along to the underlying model `__init__()` method.\n config ([`PretrainedConfig`], *optional*):\n Configuration for the model to use instead of an automatically loaded configuration. Configuration can\n be automatically loaded when:\n\n - The model is a model provided by the library (loaded with the *model id* string of a pretrained\n model).\n - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the\n save directory.\n - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a\n configuration JSON file named *config.json* is found in the directory.\n state_dict (*Dict[str, torch.Tensor]*, *optional*):\n A state dictionary to use instead of a state dictionary loaded from saved weights file.\n\n This option can be used if you want to create a model from a pretrained configuration but load your own\n weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and\n [`~PreTrainedModel.from_pretrained`] is not a simpler option.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n from_tf (`bool`, *optional*, defaults to `False`):\n Load the model weights from a TensorFlow checkpoint save file (see docstring of\n `pretrained_model_name_or_path` argument).\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download:\n Deprecated and ignored. All downloads are now resumed by default when possible.\n Will be removed in v5 of Transformers.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{\'http\': \'foo.bar:3128\',\n \'http://hostname\': \'foo.bar:4012\'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (e.g., not try downloading the model).\n revision (`str`, *optional*, defaults to `"main"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n trust_remote_code (`bool`, *optional*, defaults to `False`):\n Whether or not to allow for custom models defined on the Hub in their own modeling files. This option\n should only be set to `True` for repositories you trust and in which you have read the code, as it will\n execute code present on the Hub on your local machine.\n code_revision (`str`, *optional*, defaults to `"main"`):\n The specific revision to use for the code on the Hub, if the code leaves in a different repository than\n the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based\n system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier\n allowed by git.\n kwargs (additional keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `**kwargs` will be directly passed to the\n underlying model\'s `__init__` method (we assume all relevant updates to the configuration have\n already been done)\n - If a configuration is not provided, `kwargs` will be first passed to the configuration class\n initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that\n corresponds to a configuration attribute will be used to override said attribute with the\n supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute\n will be passed to the underlying model\'s `__init__` function.\n\n Examples:\n\n ```python\n >>> from transformers import AutoConfig, BaseAutoModelClass\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")\n\n >>> # Update configuration during loading\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)\n >>> model.config.output_attentions\n True\n\n >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")\n >>> model = BaseAutoModelClass.from_pretrained(\n ... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config\n ... )\n ```\n' FROM_PRETRAINED_TF_DOCSTRING = '\n Instantiate one of the model classes of the library from a pretrained model.\n\n The model class to instantiate is selected based on the `model_type` property of the config object (either\n passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it\'s missing, by\n falling back to using pattern matching on `pretrained_model_name_or_path`:\n\n List options\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this\n case, `from_pt` should be set to `True` and a configuration object should be provided as `config`\n argument. This loading path is slower than converting the PyTorch model in a TensorFlow model\n using the provided conversion scripts and loading the TensorFlow model afterwards.\n model_args (additional positional arguments, *optional*):\n Will be passed along to the underlying model `__init__()` method.\n config ([`PretrainedConfig`], *optional*):\n Configuration for the model to use instead of an automatically loaded configuration. Configuration can\n be automatically loaded when:\n\n - The model is a model provided by the library (loaded with the *model id* string of a pretrained\n model).\n - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the\n save directory.\n - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a\n configuration JSON file named *config.json* is found in the directory.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n from_pt (`bool`, *optional*, defaults to `False`):\n Load the model weights from a PyTorch checkpoint save file (see docstring of\n `pretrained_model_name_or_path` argument).\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download:\n Deprecated and ignored. All downloads are now resumed by default when possible.\n Will be removed in v5 of Transformers.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{\'http\': \'foo.bar:3128\',\n \'http://hostname\': \'foo.bar:4012\'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (e.g., not try downloading the model).\n revision (`str`, *optional*, defaults to `"main"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n trust_remote_code (`bool`, *optional*, defaults to `False`):\n Whether or not to allow for custom models defined on the Hub in their own modeling files. This option\n should only be set to `True` for repositories you trust and in which you have read the code, as it will\n execute code present on the Hub on your local machine.\n code_revision (`str`, *optional*, defaults to `"main"`):\n The specific revision to use for the code on the Hub, if the code leaves in a different repository than\n the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based\n system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier\n allowed by git.\n kwargs (additional keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `**kwargs` will be directly passed to the\n underlying model\'s `__init__` method (we assume all relevant updates to the configuration have\n already been done)\n - If a configuration is not provided, `kwargs` will be first passed to the configuration class\n initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that\n corresponds to a configuration attribute will be used to override said attribute with the\n supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute\n will be passed to the underlying model\'s `__init__` function.\n\n Examples:\n\n ```python\n >>> from transformers import AutoConfig, BaseAutoModelClass\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")\n\n >>> # Update configuration during loading\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)\n >>> model.config.output_attentions\n True\n\n >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)\n >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")\n >>> model = BaseAutoModelClass.from_pretrained(\n ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config\n ... )\n ```\n' FROM_PRETRAINED_FLAX_DOCSTRING = '\n Instantiate one of the model classes of the library from a pretrained model.\n\n The model class to instantiate is selected based on the `model_type` property of the config object (either\n passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it\'s missing, by\n falling back to using pattern matching on `pretrained_model_name_or_path`:\n\n List options\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this\n case, `from_pt` should be set to `True` and a configuration object should be provided as `config`\n argument. This loading path is slower than converting the PyTorch model in a TensorFlow model\n using the provided conversion scripts and loading the TensorFlow model afterwards.\n model_args (additional positional arguments, *optional*):\n Will be passed along to the underlying model `__init__()` method.\n config ([`PretrainedConfig`], *optional*):\n Configuration for the model to use instead of an automatically loaded configuration. Configuration can\n be automatically loaded when:\n\n - The model is a model provided by the library (loaded with the *model id* string of a pretrained\n model).\n - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the\n save directory.\n - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a\n configuration JSON file named *config.json* is found in the directory.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n from_pt (`bool`, *optional*, defaults to `False`):\n Load the model weights from a PyTorch checkpoint save file (see docstring of\n `pretrained_model_name_or_path` argument).\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download:\n Deprecated and ignored. All downloads are now resumed by default when possible.\n Will be removed in v5 of Transformers.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{\'http\': \'foo.bar:3128\',\n \'http://hostname\': \'foo.bar:4012\'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (e.g., not try downloading the model).\n revision (`str`, *optional*, defaults to `"main"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n trust_remote_code (`bool`, *optional*, defaults to `False`):\n Whether or not to allow for custom models defined on the Hub in their own modeling files. This option\n should only be set to `True` for repositories you trust and in which you have read the code, as it will\n execute code present on the Hub on your local machine.\n code_revision (`str`, *optional*, defaults to `"main"`):\n The specific revision to use for the code on the Hub, if the code leaves in a different repository than\n the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based\n system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier\n allowed by git.\n kwargs (additional keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `**kwargs` will be directly passed to the\n underlying model\'s `__init__` method (we assume all relevant updates to the configuration have\n already been done)\n - If a configuration is not provided, `kwargs` will be first passed to the configuration class\n initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that\n corresponds to a configuration attribute will be used to override said attribute with the\n supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute\n will be passed to the underlying model\'s `__init__` function.\n\n Examples:\n\n ```python\n >>> from transformers import AutoConfig, BaseAutoModelClass\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")\n\n >>> # Update configuration during loading\n >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)\n >>> model.config.output_attentions\n True\n\n >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)\n >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")\n >>> model = BaseAutoModelClass.from_pretrained(\n ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config\n ... )\n ```\n' def _get_model_class(config, model_mapping): supported_models = model_mapping[type(config)] if not isinstance(supported_models, (list, tuple)): return supported_models name_to_model = {model.__name__: model for model in supported_models} architectures = getattr(config, 'architectures', []) for arch in architectures: if arch in name_to_model: return name_to_model[arch] elif f'TF{arch}' in name_to_model: return name_to_model[f'TF{arch}'] elif f'Flax{arch}' in name_to_model: return name_to_model[f'Flax{arch}'] return supported_models[0] class _BaseAutoModelClass: _model_mapping = None def __init__(self, *args, **kwargs): raise EnvironmentError(f'{self.__class__.__name__} is designed to be instantiated using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or `{self.__class__.__name__}.from_config(config)` methods.') @classmethod def from_config(cls, config, **kwargs): trust_remote_code = kwargs.pop('trust_remote_code', None) has_remote_code = hasattr(config, 'auto_map') and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code(trust_remote_code, config._name_or_path, has_local_code, has_remote_code) if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] if '--' in class_ref: (repo_id, class_ref) = class_ref.split('--') else: repo_id = config.name_or_path model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs) cls.register(config.__class__, model_class, exist_ok=True) _ = kwargs.pop('code_revision', None) return model_class._from_config(config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class._from_config(config, **kwargs) raise ValueError(f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\nModel type should be one of {', '.join((c.__name__ for c in cls._model_mapping.keys()))}.") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop('config', None) trust_remote_code = kwargs.pop('trust_remote_code', None) kwargs['_from_auto'] = True hub_kwargs_names = ['cache_dir', 'force_download', 'local_files_only', 'proxies', 'resume_download', 'revision', 'subfolder', 'use_auth_token', 'token'] hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} code_revision = kwargs.pop('code_revision', None) commit_hash = kwargs.pop('_commit_hash', None) adapter_kwargs = kwargs.pop('adapter_kwargs', None) token = hub_kwargs.pop('token', None) use_auth_token = hub_kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token if token is not None: hub_kwargs['token'] = token if commit_hash is None: if not isinstance(config, PretrainedConfig): resolved_config_file = cached_file(pretrained_model_name_or_path, CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, **hub_kwargs) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, '_commit_hash', None) if is_peft_available(): if adapter_kwargs is None: adapter_kwargs = {} if token is not None: adapter_kwargs['token'] = token maybe_adapter_path = find_adapter_config_file(pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs) if maybe_adapter_path is not None: with open(maybe_adapter_path, 'r', encoding='utf-8') as f: adapter_config = json.load(f) adapter_kwargs['_adapter_model_path'] = pretrained_model_name_or_path pretrained_model_name_or_path = adapter_config['base_model_name_or_path'] if not isinstance(config, PretrainedConfig): kwargs_orig = copy.deepcopy(kwargs) if kwargs.get('torch_dtype', None) == 'auto': _ = kwargs.pop('torch_dtype') if kwargs.get('quantization_config', None) is not None: _ = kwargs.pop('quantization_config') (config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, code_revision=code_revision, _commit_hash=commit_hash, **hub_kwargs, **kwargs) if kwargs_orig.get('torch_dtype', None) == 'auto': kwargs['torch_dtype'] = 'auto' if kwargs_orig.get('quantization_config', None) is not None: kwargs['quantization_config'] = kwargs_orig['quantization_config'] has_remote_code = hasattr(config, 'auto_map') and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) kwargs['adapter_kwargs'] = adapter_kwargs if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] model_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs) _ = hub_kwargs.pop('code_revision', None) cls.register(config.__class__, model_class, exist_ok=True) return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs) raise ValueError(f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\nModel type should be one of {', '.join((c.__name__ for c in cls._model_mapping.keys()))}.") @classmethod def register(cls, config_class, model_class, exist_ok=False): if hasattr(model_class, 'config_class') and str(model_class.config_class) != str(config_class): raise ValueError(f'The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix one of those so they match!') cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok) class _BaseAutoBackboneClass(_BaseAutoModelClass): _model_mapping = None @classmethod def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ['vision', 'timm']) from ...models.timm_backbone import TimmBackboneConfig config = kwargs.pop('config', TimmBackboneConfig()) if kwargs.get('out_features', None) is not None: raise ValueError('Cannot specify `out_features` for timm backbones') if kwargs.get('output_loading_info', False): raise ValueError('Cannot specify `output_loading_info=True` when loading from timm') num_channels = kwargs.pop('num_channels', config.num_channels) features_only = kwargs.pop('features_only', config.features_only) use_pretrained_backbone = kwargs.pop('use_pretrained_backbone', config.use_pretrained_backbone) out_indices = kwargs.pop('out_indices', config.out_indices) config = TimmBackboneConfig(backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices) return super().from_config(config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_timm_backbone = kwargs.pop('use_timm_backbone', False) if use_timm_backbone: return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def insert_head_doc(docstring, head_doc=''): if len(head_doc) > 0: return docstring.replace('one of the model classes of the library ', f'one of the model classes of the library (with a {head_doc} head) ') return docstring.replace('one of the model classes of the library ', 'one of the base model classes of the library ') def auto_class_update(cls, checkpoint_for_example='google-bert/bert-base-cased', head_doc=''): model_mapping = cls._model_mapping name = cls.__name__ class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc) cls.__doc__ = class_docstring.replace('BaseAutoModelClass', name) from_config = copy_func(_BaseAutoModelClass.from_config) from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc) from_config_docstring = from_config_docstring.replace('BaseAutoModelClass', name) from_config_docstring = from_config_docstring.replace('checkpoint_placeholder', checkpoint_for_example) from_config.__doc__ = from_config_docstring from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config) cls.from_config = classmethod(from_config) if name.startswith('TF'): from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING elif name.startswith('Flax'): from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING else: from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained) from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc) from_pretrained_docstring = from_pretrained_docstring.replace('BaseAutoModelClass', name) from_pretrained_docstring = from_pretrained_docstring.replace('checkpoint_placeholder', checkpoint_for_example) shortcut = checkpoint_for_example.split('/')[-1].split('-')[0] from_pretrained_docstring = from_pretrained_docstring.replace('shortcut_placeholder', shortcut) from_pretrained.__doc__ = from_pretrained_docstring from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained) cls.from_pretrained = classmethod(from_pretrained) return cls def get_values(model_mapping): result = [] for model in model_mapping.values(): if isinstance(model, (list, tuple)): result += list(model) else: result.append(model) return result def getattribute_from_module(module, attr): if attr is None: return None if isinstance(attr, tuple): return tuple((getattribute_from_module(module, a) for a in attr)) if hasattr(module, attr): return getattr(module, attr) transformers_module = importlib.import_module('transformers') if module != transformers_module: try: return getattribute_from_module(transformers_module, attr) except ValueError: raise ValueError(f'Could not find {attr} neither in {module} nor in {transformers_module}!') else: raise ValueError(f'Could not find {attr} in {transformers_module}!') class _LazyAutoMapping(OrderedDict): def __init__(self, config_mapping, model_mapping): self._config_mapping = config_mapping self._reverse_config_mapping = {v: k for (k, v) in config_mapping.items()} self._model_mapping = model_mapping self._model_mapping._model_mapping = self self._extra_content = {} self._modules = {} def __len__(self): common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys()) return len(common_keys) + len(self._extra_content) def __getitem__(self, key): if key in self._extra_content: return self._extra_content[key] model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping: model_name = self._model_mapping[model_type] return self._load_attr_from_module(model_type, model_name) model_types = [k for (k, v) in self._config_mapping.items() if v == key.__name__] for mtype in model_types: if mtype in self._model_mapping: model_name = self._model_mapping[mtype] return self._load_attr_from_module(mtype, model_name) raise KeyError(key) def _load_attr_from_module(self, model_type, attr): module_name = model_type_to_module_name(model_type) if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models') return getattribute_from_module(self._modules[module_name], attr) def keys(self): mapping_keys = [self._load_attr_from_module(key, name) for (key, name) in self._config_mapping.items() if key in self._model_mapping.keys()] return mapping_keys + list(self._extra_content.keys()) def get(self, key, default): try: return self.__getitem__(key) except KeyError: return default def __bool__(self): return bool(self.keys()) def values(self): mapping_values = [self._load_attr_from_module(key, name) for (key, name) in self._model_mapping.items() if key in self._config_mapping.keys()] return mapping_values + list(self._extra_content.values()) def items(self): mapping_items = [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if key in self._config_mapping.keys()] return mapping_items + list(self._extra_content.items()) def __iter__(self): return iter(self.keys()) def __contains__(self, item): if item in self._extra_content: return True if not hasattr(item, '__name__') or item.__name__ not in self._reverse_config_mapping: return False model_type = self._reverse_config_mapping[item.__name__] return model_type in self._model_mapping def register(self, key, value, exist_ok=False): if hasattr(key, '__name__') and key.__name__ in self._reverse_config_mapping: model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping.keys() and (not exist_ok): raise ValueError(f"'{key}' is already used by a Transformers model.") self._extra_content[key] = value # File: transformers-main/src/transformers/models/auto/configuration_auto.py """""" import importlib import os import re import warnings from collections import OrderedDict from typing import List, Union from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...utils import CONFIG_NAME, logging logger = logging.get_logger(__name__) CONFIG_MAPPING_NAMES = OrderedDict([('albert', 'AlbertConfig'), ('align', 'AlignConfig'), ('altclip', 'AltCLIPConfig'), ('audio-spectrogram-transformer', 'ASTConfig'), ('autoformer', 'AutoformerConfig'), ('bark', 'BarkConfig'), ('bart', 'BartConfig'), ('beit', 'BeitConfig'), ('bert', 'BertConfig'), ('bert-generation', 'BertGenerationConfig'), ('big_bird', 'BigBirdConfig'), ('bigbird_pegasus', 'BigBirdPegasusConfig'), ('biogpt', 'BioGptConfig'), ('bit', 'BitConfig'), ('blenderbot', 'BlenderbotConfig'), ('blenderbot-small', 'BlenderbotSmallConfig'), ('blip', 'BlipConfig'), ('blip-2', 'Blip2Config'), ('bloom', 'BloomConfig'), ('bridgetower', 'BridgeTowerConfig'), ('bros', 'BrosConfig'), ('camembert', 'CamembertConfig'), ('canine', 'CanineConfig'), ('chameleon', 'ChameleonConfig'), ('chinese_clip', 'ChineseCLIPConfig'), ('chinese_clip_vision_model', 'ChineseCLIPVisionConfig'), ('clap', 'ClapConfig'), ('clip', 'CLIPConfig'), ('clip_vision_model', 'CLIPVisionConfig'), ('clipseg', 'CLIPSegConfig'), ('clvp', 'ClvpConfig'), ('code_llama', 'LlamaConfig'), ('codegen', 'CodeGenConfig'), ('cohere', 'CohereConfig'), ('conditional_detr', 'ConditionalDetrConfig'), ('convbert', 'ConvBertConfig'), ('convnext', 'ConvNextConfig'), ('convnextv2', 'ConvNextV2Config'), ('cpmant', 'CpmAntConfig'), ('ctrl', 'CTRLConfig'), ('cvt', 'CvtConfig'), ('dac', 'DacConfig'), ('data2vec-audio', 'Data2VecAudioConfig'), ('data2vec-text', 'Data2VecTextConfig'), ('data2vec-vision', 'Data2VecVisionConfig'), ('dbrx', 'DbrxConfig'), ('deberta', 'DebertaConfig'), ('deberta-v2', 'DebertaV2Config'), ('decision_transformer', 'DecisionTransformerConfig'), ('deformable_detr', 'DeformableDetrConfig'), ('deit', 'DeiTConfig'), ('depth_anything', 'DepthAnythingConfig'), ('deta', 'DetaConfig'), ('detr', 'DetrConfig'), ('dinat', 'DinatConfig'), ('dinov2', 'Dinov2Config'), ('distilbert', 'DistilBertConfig'), ('donut-swin', 'DonutSwinConfig'), ('dpr', 'DPRConfig'), ('dpt', 'DPTConfig'), ('efficientformer', 'EfficientFormerConfig'), ('efficientnet', 'EfficientNetConfig'), ('electra', 'ElectraConfig'), ('encodec', 'EncodecConfig'), ('encoder-decoder', 'EncoderDecoderConfig'), ('ernie', 'ErnieConfig'), ('ernie_m', 'ErnieMConfig'), ('esm', 'EsmConfig'), ('falcon', 'FalconConfig'), ('falcon_mamba', 'FalconMambaConfig'), ('fastspeech2_conformer', 'FastSpeech2ConformerConfig'), ('flaubert', 'FlaubertConfig'), ('flava', 'FlavaConfig'), ('fnet', 'FNetConfig'), ('focalnet', 'FocalNetConfig'), ('fsmt', 'FSMTConfig'), ('funnel', 'FunnelConfig'), ('fuyu', 'FuyuConfig'), ('gemma', 'GemmaConfig'), ('gemma2', 'Gemma2Config'), ('git', 'GitConfig'), ('glpn', 'GLPNConfig'), ('gpt-sw3', 'GPT2Config'), ('gpt2', 'GPT2Config'), ('gpt_bigcode', 'GPTBigCodeConfig'), ('gpt_neo', 'GPTNeoConfig'), ('gpt_neox', 'GPTNeoXConfig'), ('gpt_neox_japanese', 'GPTNeoXJapaneseConfig'), ('gptj', 'GPTJConfig'), ('gptsan-japanese', 'GPTSanJapaneseConfig'), ('granite', 'GraniteConfig'), ('graphormer', 'GraphormerConfig'), ('grounding-dino', 'GroundingDinoConfig'), ('groupvit', 'GroupViTConfig'), ('hiera', 'HieraConfig'), ('hubert', 'HubertConfig'), ('ibert', 'IBertConfig'), ('idefics', 'IdeficsConfig'), ('idefics2', 'Idefics2Config'), ('imagegpt', 'ImageGPTConfig'), ('informer', 'InformerConfig'), ('instructblip', 'InstructBlipConfig'), ('instructblipvideo', 'InstructBlipVideoConfig'), ('jamba', 'JambaConfig'), ('jetmoe', 'JetMoeConfig'), ('jukebox', 'JukeboxConfig'), ('kosmos-2', 'Kosmos2Config'), ('layoutlm', 'LayoutLMConfig'), ('layoutlmv2', 'LayoutLMv2Config'), ('layoutlmv3', 'LayoutLMv3Config'), ('led', 'LEDConfig'), ('levit', 'LevitConfig'), ('lilt', 'LiltConfig'), ('llama', 'LlamaConfig'), ('llava', 'LlavaConfig'), ('llava_next', 'LlavaNextConfig'), ('llava_next_video', 'LlavaNextVideoConfig'), ('llava_onevision', 'LlavaOnevisionConfig'), ('longformer', 'LongformerConfig'), ('longt5', 'LongT5Config'), ('luke', 'LukeConfig'), ('lxmert', 'LxmertConfig'), ('m2m_100', 'M2M100Config'), ('mamba', 'MambaConfig'), ('mamba2', 'Mamba2Config'), ('marian', 'MarianConfig'), ('markuplm', 'MarkupLMConfig'), ('mask2former', 'Mask2FormerConfig'), ('maskformer', 'MaskFormerConfig'), ('maskformer-swin', 'MaskFormerSwinConfig'), ('mbart', 'MBartConfig'), ('mctct', 'MCTCTConfig'), ('mega', 'MegaConfig'), ('megatron-bert', 'MegatronBertConfig'), ('mgp-str', 'MgpstrConfig'), ('mistral', 'MistralConfig'), ('mixtral', 'MixtralConfig'), ('mobilebert', 'MobileBertConfig'), ('mobilenet_v1', 'MobileNetV1Config'), ('mobilenet_v2', 'MobileNetV2Config'), ('mobilevit', 'MobileViTConfig'), ('mobilevitv2', 'MobileViTV2Config'), ('mpnet', 'MPNetConfig'), ('mpt', 'MptConfig'), ('mra', 'MraConfig'), ('mt5', 'MT5Config'), ('musicgen', 'MusicgenConfig'), ('musicgen_melody', 'MusicgenMelodyConfig'), ('mvp', 'MvpConfig'), ('nat', 'NatConfig'), ('nemotron', 'NemotronConfig'), ('nezha', 'NezhaConfig'), ('nllb-moe', 'NllbMoeConfig'), ('nougat', 'VisionEncoderDecoderConfig'), ('nystromformer', 'NystromformerConfig'), ('olmo', 'OlmoConfig'), ('olmoe', 'OlmoeConfig'), ('oneformer', 'OneFormerConfig'), ('open-llama', 'OpenLlamaConfig'), ('openai-gpt', 'OpenAIGPTConfig'), ('opt', 'OPTConfig'), ('owlv2', 'Owlv2Config'), ('owlvit', 'OwlViTConfig'), ('paligemma', 'PaliGemmaConfig'), ('patchtsmixer', 'PatchTSMixerConfig'), ('patchtst', 'PatchTSTConfig'), ('pegasus', 'PegasusConfig'), ('pegasus_x', 'PegasusXConfig'), ('perceiver', 'PerceiverConfig'), ('persimmon', 'PersimmonConfig'), ('phi', 'PhiConfig'), ('phi3', 'Phi3Config'), ('pix2struct', 'Pix2StructConfig'), ('pixtral', 'PixtralVisionConfig'), ('plbart', 'PLBartConfig'), ('poolformer', 'PoolFormerConfig'), ('pop2piano', 'Pop2PianoConfig'), ('prophetnet', 'ProphetNetConfig'), ('pvt', 'PvtConfig'), ('pvt_v2', 'PvtV2Config'), ('qdqbert', 'QDQBertConfig'), ('qwen2', 'Qwen2Config'), ('qwen2_audio', 'Qwen2AudioConfig'), ('qwen2_audio_encoder', 'Qwen2AudioEncoderConfig'), ('qwen2_moe', 'Qwen2MoeConfig'), ('qwen2_vl', 'Qwen2VLConfig'), ('rag', 'RagConfig'), ('realm', 'RealmConfig'), ('recurrent_gemma', 'RecurrentGemmaConfig'), ('reformer', 'ReformerConfig'), ('regnet', 'RegNetConfig'), ('rembert', 'RemBertConfig'), ('resnet', 'ResNetConfig'), ('retribert', 'RetriBertConfig'), ('roberta', 'RobertaConfig'), ('roberta-prelayernorm', 'RobertaPreLayerNormConfig'), ('roc_bert', 'RoCBertConfig'), ('roformer', 'RoFormerConfig'), ('rt_detr', 'RTDetrConfig'), ('rt_detr_resnet', 'RTDetrResNetConfig'), ('rwkv', 'RwkvConfig'), ('sam', 'SamConfig'), ('seamless_m4t', 'SeamlessM4TConfig'), ('seamless_m4t_v2', 'SeamlessM4Tv2Config'), ('segformer', 'SegformerConfig'), ('seggpt', 'SegGptConfig'), ('sew', 'SEWConfig'), ('sew-d', 'SEWDConfig'), ('siglip', 'SiglipConfig'), ('siglip_vision_model', 'SiglipVisionConfig'), ('speech-encoder-decoder', 'SpeechEncoderDecoderConfig'), ('speech_to_text', 'Speech2TextConfig'), ('speech_to_text_2', 'Speech2Text2Config'), ('speecht5', 'SpeechT5Config'), ('splinter', 'SplinterConfig'), ('squeezebert', 'SqueezeBertConfig'), ('stablelm', 'StableLmConfig'), ('starcoder2', 'Starcoder2Config'), ('superpoint', 'SuperPointConfig'), ('swiftformer', 'SwiftFormerConfig'), ('swin', 'SwinConfig'), ('swin2sr', 'Swin2SRConfig'), ('swinv2', 'Swinv2Config'), ('switch_transformers', 'SwitchTransformersConfig'), ('t5', 'T5Config'), ('table-transformer', 'TableTransformerConfig'), ('tapas', 'TapasConfig'), ('time_series_transformer', 'TimeSeriesTransformerConfig'), ('timesformer', 'TimesformerConfig'), ('timm_backbone', 'TimmBackboneConfig'), ('trajectory_transformer', 'TrajectoryTransformerConfig'), ('transfo-xl', 'TransfoXLConfig'), ('trocr', 'TrOCRConfig'), ('tvlt', 'TvltConfig'), ('tvp', 'TvpConfig'), ('udop', 'UdopConfig'), ('umt5', 'UMT5Config'), ('unispeech', 'UniSpeechConfig'), ('unispeech-sat', 'UniSpeechSatConfig'), ('univnet', 'UnivNetConfig'), ('upernet', 'UperNetConfig'), ('van', 'VanConfig'), ('video_llava', 'VideoLlavaConfig'), ('videomae', 'VideoMAEConfig'), ('vilt', 'ViltConfig'), ('vipllava', 'VipLlavaConfig'), ('vision-encoder-decoder', 'VisionEncoderDecoderConfig'), ('vision-text-dual-encoder', 'VisionTextDualEncoderConfig'), ('visual_bert', 'VisualBertConfig'), ('vit', 'ViTConfig'), ('vit_hybrid', 'ViTHybridConfig'), ('vit_mae', 'ViTMAEConfig'), ('vit_msn', 'ViTMSNConfig'), ('vitdet', 'VitDetConfig'), ('vitmatte', 'VitMatteConfig'), ('vits', 'VitsConfig'), ('vivit', 'VivitConfig'), ('wav2vec2', 'Wav2Vec2Config'), ('wav2vec2-bert', 'Wav2Vec2BertConfig'), ('wav2vec2-conformer', 'Wav2Vec2ConformerConfig'), ('wavlm', 'WavLMConfig'), ('whisper', 'WhisperConfig'), ('xclip', 'XCLIPConfig'), ('xglm', 'XGLMConfig'), ('xlm', 'XLMConfig'), ('xlm-prophetnet', 'XLMProphetNetConfig'), ('xlm-roberta', 'XLMRobertaConfig'), ('xlm-roberta-xl', 'XLMRobertaXLConfig'), ('xlnet', 'XLNetConfig'), ('xmod', 'XmodConfig'), ('yolos', 'YolosConfig'), ('yoso', 'YosoConfig'), ('zoedepth', 'ZoeDepthConfig')]) MODEL_NAMES_MAPPING = OrderedDict([('albert', 'ALBERT'), ('align', 'ALIGN'), ('altclip', 'AltCLIP'), ('audio-spectrogram-transformer', 'Audio Spectrogram Transformer'), ('autoformer', 'Autoformer'), ('bark', 'Bark'), ('bart', 'BART'), ('barthez', 'BARThez'), ('bartpho', 'BARTpho'), ('beit', 'BEiT'), ('bert', 'BERT'), ('bert-generation', 'Bert Generation'), ('bert-japanese', 'BertJapanese'), ('bertweet', 'BERTweet'), ('big_bird', 'BigBird'), ('bigbird_pegasus', 'BigBird-Pegasus'), ('biogpt', 'BioGpt'), ('bit', 'BiT'), ('blenderbot', 'Blenderbot'), ('blenderbot-small', 'BlenderbotSmall'), ('blip', 'BLIP'), ('blip-2', 'BLIP-2'), ('bloom', 'BLOOM'), ('bort', 'BORT'), ('bridgetower', 'BridgeTower'), ('bros', 'BROS'), ('byt5', 'ByT5'), ('camembert', 'CamemBERT'), ('canine', 'CANINE'), ('chameleon', 'Chameleon'), ('chinese_clip', 'Chinese-CLIP'), ('chinese_clip_vision_model', 'ChineseCLIPVisionModel'), ('clap', 'CLAP'), ('clip', 'CLIP'), ('clip_vision_model', 'CLIPVisionModel'), ('clipseg', 'CLIPSeg'), ('clvp', 'CLVP'), ('code_llama', 'CodeLlama'), ('codegen', 'CodeGen'), ('cohere', 'Cohere'), ('conditional_detr', 'Conditional DETR'), ('convbert', 'ConvBERT'), ('convnext', 'ConvNeXT'), ('convnextv2', 'ConvNeXTV2'), ('cpm', 'CPM'), ('cpmant', 'CPM-Ant'), ('ctrl', 'CTRL'), ('cvt', 'CvT'), ('dac', 'DAC'), ('data2vec-audio', 'Data2VecAudio'), ('data2vec-text', 'Data2VecText'), ('data2vec-vision', 'Data2VecVision'), ('dbrx', 'DBRX'), ('deberta', 'DeBERTa'), ('deberta-v2', 'DeBERTa-v2'), ('decision_transformer', 'Decision Transformer'), ('deformable_detr', 'Deformable DETR'), ('deit', 'DeiT'), ('deplot', 'DePlot'), ('depth_anything', 'Depth Anything'), ('depth_anything_v2', 'Depth Anything V2'), ('deta', 'DETA'), ('detr', 'DETR'), ('dialogpt', 'DialoGPT'), ('dinat', 'DiNAT'), ('dinov2', 'DINOv2'), ('distilbert', 'DistilBERT'), ('dit', 'DiT'), ('donut-swin', 'DonutSwin'), ('dpr', 'DPR'), ('dpt', 'DPT'), ('efficientformer', 'EfficientFormer'), ('efficientnet', 'EfficientNet'), ('electra', 'ELECTRA'), ('encodec', 'EnCodec'), ('encoder-decoder', 'Encoder decoder'), ('ernie', 'ERNIE'), ('ernie_m', 'ErnieM'), ('esm', 'ESM'), ('falcon', 'Falcon'), ('falcon_mamba', 'FalconMamba'), ('fastspeech2_conformer', 'FastSpeech2Conformer'), ('flan-t5', 'FLAN-T5'), ('flan-ul2', 'FLAN-UL2'), ('flaubert', 'FlauBERT'), ('flava', 'FLAVA'), ('fnet', 'FNet'), ('focalnet', 'FocalNet'), ('fsmt', 'FairSeq Machine-Translation'), ('funnel', 'Funnel Transformer'), ('fuyu', 'Fuyu'), ('gemma', 'Gemma'), ('gemma2', 'Gemma2'), ('git', 'GIT'), ('glpn', 'GLPN'), ('gpt-sw3', 'GPT-Sw3'), ('gpt2', 'OpenAI GPT-2'), ('gpt_bigcode', 'GPTBigCode'), ('gpt_neo', 'GPT Neo'), ('gpt_neox', 'GPT NeoX'), ('gpt_neox_japanese', 'GPT NeoX Japanese'), ('gptj', 'GPT-J'), ('gptsan-japanese', 'GPTSAN-japanese'), ('granite', 'Granite'), ('graphormer', 'Graphormer'), ('grounding-dino', 'Grounding DINO'), ('groupvit', 'GroupViT'), ('herbert', 'HerBERT'), ('hiera', 'Hiera'), ('hubert', 'Hubert'), ('ibert', 'I-BERT'), ('idefics', 'IDEFICS'), ('idefics2', 'Idefics2'), ('imagegpt', 'ImageGPT'), ('informer', 'Informer'), ('instructblip', 'InstructBLIP'), ('instructblipvideo', 'InstructBlipVideo'), ('jamba', 'Jamba'), ('jetmoe', 'JetMoe'), ('jukebox', 'Jukebox'), ('kosmos-2', 'KOSMOS-2'), ('layoutlm', 'LayoutLM'), ('layoutlmv2', 'LayoutLMv2'), ('layoutlmv3', 'LayoutLMv3'), ('layoutxlm', 'LayoutXLM'), ('led', 'LED'), ('levit', 'LeViT'), ('lilt', 'LiLT'), ('llama', 'LLaMA'), ('llama2', 'Llama2'), ('llama3', 'Llama3'), ('llava', 'LLaVa'), ('llava_next', 'LLaVA-NeXT'), ('llava_next_video', 'LLaVa-NeXT-Video'), ('llava_onevision', 'LLaVA-Onevision'), ('longformer', 'Longformer'), ('longt5', 'LongT5'), ('luke', 'LUKE'), ('lxmert', 'LXMERT'), ('m2m_100', 'M2M100'), ('madlad-400', 'MADLAD-400'), ('mamba', 'Mamba'), ('mamba2', 'mamba2'), ('marian', 'Marian'), ('markuplm', 'MarkupLM'), ('mask2former', 'Mask2Former'), ('maskformer', 'MaskFormer'), ('maskformer-swin', 'MaskFormerSwin'), ('matcha', 'MatCha'), ('mbart', 'mBART'), ('mbart50', 'mBART-50'), ('mctct', 'M-CTC-T'), ('mega', 'MEGA'), ('megatron-bert', 'Megatron-BERT'), ('megatron_gpt2', 'Megatron-GPT2'), ('mgp-str', 'MGP-STR'), ('mistral', 'Mistral'), ('mixtral', 'Mixtral'), ('mluke', 'mLUKE'), ('mms', 'MMS'), ('mobilebert', 'MobileBERT'), ('mobilenet_v1', 'MobileNetV1'), ('mobilenet_v2', 'MobileNetV2'), ('mobilevit', 'MobileViT'), ('mobilevitv2', 'MobileViTV2'), ('mpnet', 'MPNet'), ('mpt', 'MPT'), ('mra', 'MRA'), ('mt5', 'MT5'), ('musicgen', 'MusicGen'), ('musicgen_melody', 'MusicGen Melody'), ('mvp', 'MVP'), ('nat', 'NAT'), ('nemotron', 'Nemotron'), ('nezha', 'Nezha'), ('nllb', 'NLLB'), ('nllb-moe', 'NLLB-MOE'), ('nougat', 'Nougat'), ('nystromformer', 'Nyströmformer'), ('olmo', 'OLMo'), ('olmoe', 'OLMoE'), ('oneformer', 'OneFormer'), ('open-llama', 'OpenLlama'), ('openai-gpt', 'OpenAI GPT'), ('opt', 'OPT'), ('owlv2', 'OWLv2'), ('owlvit', 'OWL-ViT'), ('paligemma', 'PaliGemma'), ('patchtsmixer', 'PatchTSMixer'), ('patchtst', 'PatchTST'), ('pegasus', 'Pegasus'), ('pegasus_x', 'PEGASUS-X'), ('perceiver', 'Perceiver'), ('persimmon', 'Persimmon'), ('phi', 'Phi'), ('phi3', 'Phi3'), ('phobert', 'PhoBERT'), ('pix2struct', 'Pix2Struct'), ('pixtral', 'Pixtral'), ('plbart', 'PLBart'), ('poolformer', 'PoolFormer'), ('pop2piano', 'Pop2Piano'), ('prophetnet', 'ProphetNet'), ('pvt', 'PVT'), ('pvt_v2', 'PVTv2'), ('qdqbert', 'QDQBert'), ('qwen2', 'Qwen2'), ('qwen2_audio', 'Qwen2Audio'), ('qwen2_audio_encoder', 'Qwen2AudioEncoder'), ('qwen2_moe', 'Qwen2MoE'), ('qwen2_vl', 'Qwen2VL'), ('rag', 'RAG'), ('realm', 'REALM'), ('recurrent_gemma', 'RecurrentGemma'), ('reformer', 'Reformer'), ('regnet', 'RegNet'), ('rembert', 'RemBERT'), ('resnet', 'ResNet'), ('retribert', 'RetriBERT'), ('roberta', 'RoBERTa'), ('roberta-prelayernorm', 'RoBERTa-PreLayerNorm'), ('roc_bert', 'RoCBert'), ('roformer', 'RoFormer'), ('rt_detr', 'RT-DETR'), ('rt_detr_resnet', 'RT-DETR-ResNet'), ('rwkv', 'RWKV'), ('sam', 'SAM'), ('seamless_m4t', 'SeamlessM4T'), ('seamless_m4t_v2', 'SeamlessM4Tv2'), ('segformer', 'SegFormer'), ('seggpt', 'SegGPT'), ('sew', 'SEW'), ('sew-d', 'SEW-D'), ('siglip', 'SigLIP'), ('siglip_vision_model', 'SiglipVisionModel'), ('speech-encoder-decoder', 'Speech Encoder decoder'), ('speech_to_text', 'Speech2Text'), ('speech_to_text_2', 'Speech2Text2'), ('speecht5', 'SpeechT5'), ('splinter', 'Splinter'), ('squeezebert', 'SqueezeBERT'), ('stablelm', 'StableLm'), ('starcoder2', 'Starcoder2'), ('superpoint', 'SuperPoint'), ('swiftformer', 'SwiftFormer'), ('swin', 'Swin Transformer'), ('swin2sr', 'Swin2SR'), ('swinv2', 'Swin Transformer V2'), ('switch_transformers', 'SwitchTransformers'), ('t5', 'T5'), ('t5v1.1', 'T5v1.1'), ('table-transformer', 'Table Transformer'), ('tapas', 'TAPAS'), ('tapex', 'TAPEX'), ('time_series_transformer', 'Time Series Transformer'), ('timesformer', 'TimeSformer'), ('timm_backbone', 'TimmBackbone'), ('trajectory_transformer', 'Trajectory Transformer'), ('transfo-xl', 'Transformer-XL'), ('trocr', 'TrOCR'), ('tvlt', 'TVLT'), ('tvp', 'TVP'), ('udop', 'UDOP'), ('ul2', 'UL2'), ('umt5', 'UMT5'), ('unispeech', 'UniSpeech'), ('unispeech-sat', 'UniSpeechSat'), ('univnet', 'UnivNet'), ('upernet', 'UPerNet'), ('van', 'VAN'), ('video_llava', 'VideoLlava'), ('videomae', 'VideoMAE'), ('vilt', 'ViLT'), ('vipllava', 'VipLlava'), ('vision-encoder-decoder', 'Vision Encoder decoder'), ('vision-text-dual-encoder', 'VisionTextDualEncoder'), ('visual_bert', 'VisualBERT'), ('vit', 'ViT'), ('vit_hybrid', 'ViT Hybrid'), ('vit_mae', 'ViTMAE'), ('vit_msn', 'ViTMSN'), ('vitdet', 'VitDet'), ('vitmatte', 'ViTMatte'), ('vits', 'VITS'), ('vivit', 'ViViT'), ('wav2vec2', 'Wav2Vec2'), ('wav2vec2-bert', 'Wav2Vec2-BERT'), ('wav2vec2-conformer', 'Wav2Vec2-Conformer'), ('wav2vec2_phoneme', 'Wav2Vec2Phoneme'), ('wavlm', 'WavLM'), ('whisper', 'Whisper'), ('xclip', 'X-CLIP'), ('xglm', 'XGLM'), ('xlm', 'XLM'), ('xlm-prophetnet', 'XLM-ProphetNet'), ('xlm-roberta', 'XLM-RoBERTa'), ('xlm-roberta-xl', 'XLM-RoBERTa-XL'), ('xlm-v', 'XLM-V'), ('xlnet', 'XLNet'), ('xls_r', 'XLS-R'), ('xlsr_wav2vec2', 'XLSR-Wav2Vec2'), ('xmod', 'X-MOD'), ('yolos', 'YOLOS'), ('yoso', 'YOSO'), ('zoedepth', 'ZoeDepth')]) DEPRECATED_MODELS = ['bort', 'deta', 'efficientformer', 'ernie_m', 'gptsan_japanese', 'graphormer', 'jukebox', 'mctct', 'mega', 'mmbt', 'nat', 'nezha', 'open_llama', 'qdqbert', 'realm', 'retribert', 'speech_to_text_2', 'tapex', 'trajectory_transformer', 'transfo_xl', 'tvlt', 'van', 'vit_hybrid', 'xlm_prophetnet'] SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict([('openai-gpt', 'openai'), ('data2vec-audio', 'data2vec'), ('data2vec-text', 'data2vec'), ('data2vec-vision', 'data2vec'), ('donut-swin', 'donut'), ('kosmos-2', 'kosmos2'), ('maskformer-swin', 'maskformer'), ('xclip', 'x_clip'), ('clip_vision_model', 'clip'), ('qwen2_audio_encoder', 'qwen2_audio'), ('siglip_vision_model', 'siglip'), ('chinese_clip_vision_model', 'chinese_clip'), ('rt_detr_resnet', 'rt_detr')]) def model_type_to_module_name(key): if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: key = SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] if key in DEPRECATED_MODELS: key = f'deprecated.{key}' return key key = key.replace('-', '_') if key in DEPRECATED_MODELS: key = f'deprecated.{key}' return key def config_class_to_model_type(config): for (key, cls) in CONFIG_MAPPING_NAMES.items(): if cls == config: return key for (key, cls) in CONFIG_MAPPING._extra_content.items(): if cls.__name__ == config: return key return None class _LazyConfigMapping(OrderedDict): def __init__(self, mapping): self._mapping = mapping self._extra_content = {} self._modules = {} def __getitem__(self, key): if key in self._extra_content: return self._extra_content[key] if key not in self._mapping: raise KeyError(key) value = self._mapping[key] module_name = model_type_to_module_name(key) if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f'.{module_name}', 'transformers.models') if hasattr(self._modules[module_name], value): return getattr(self._modules[module_name], value) transformers_module = importlib.import_module('transformers') return getattr(transformers_module, value) def keys(self): return list(self._mapping.keys()) + list(self._extra_content.keys()) def values(self): return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values()) def items(self): return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()) def __iter__(self): return iter(list(self._mapping.keys()) + list(self._extra_content.keys())) def __contains__(self, item): return item in self._mapping or item in self._extra_content def register(self, key, value, exist_ok=False): if key in self._mapping.keys() and (not exist_ok): raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.") self._extra_content[key] = value CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES) class _LazyLoadAllMappings(OrderedDict): def __init__(self, mapping): self._mapping = mapping self._initialized = False self._data = {} def _initialize(self): if self._initialized: return for (model_type, map_name) in self._mapping.items(): module_name = model_type_to_module_name(model_type) module = importlib.import_module(f'.{module_name}', 'transformers.models') mapping = getattr(module, map_name) self._data.update(mapping) self._initialized = True def __getitem__(self, key): self._initialize() return self._data[key] def keys(self): self._initialize() return self._data.keys() def values(self): self._initialize() return self._data.values() def items(self): self._initialize() return self._data.keys() def __iter__(self): self._initialize() return iter(self._data) def __contains__(self, item): self._initialize() return item in self._data def _get_class_name(model_class: Union[str, List[str]]): if isinstance(model_class, (list, tuple)): return ' or '.join([f'[`{c}`]' for c in model_class if c is not None]) return f'[`{model_class}`]' def _list_model_options(indent, config_to_class=None, use_model_types=True): if config_to_class is None and (not use_model_types): raise ValueError('Using `use_model_types=False` requires a `config_to_class` dictionary.') if use_model_types: if config_to_class is None: model_type_to_name = {model_type: f'[`{config}`]' for (model_type, config) in CONFIG_MAPPING_NAMES.items()} else: model_type_to_name = {model_type: _get_class_name(model_class) for (model_type, model_class) in config_to_class.items() if model_type in MODEL_NAMES_MAPPING} lines = [f'{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)' for model_type in sorted(model_type_to_name.keys())] else: config_to_name = {CONFIG_MAPPING_NAMES[config]: _get_class_name(clas) for (config, clas) in config_to_class.items() if config in CONFIG_MAPPING_NAMES} config_to_model_name = {config: MODEL_NAMES_MAPPING[model_type] for (model_type, config) in CONFIG_MAPPING_NAMES.items()} lines = [f'{indent}- [`{config_name}`] configuration class: {config_to_name[config_name]} ({config_to_model_name[config_name]} model)' for config_name in sorted(config_to_name.keys())] return '\n'.join(lines) def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): def docstring_decorator(fn): docstrings = fn.__doc__ if docstrings is None: return fn lines = docstrings.split('\n') i = 0 while i < len(lines) and re.search('^(\\s*)List options\\s*$', lines[i]) is None: i += 1 if i < len(lines): indent = re.search('^(\\s*)List options\\s*$', lines[i]).groups()[0] if use_model_types: indent = f'{indent} ' lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) docstrings = '\n'.join(lines) else: raise ValueError(f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current docstring is:\n{docstrings}") fn.__doc__ = docstrings return fn return docstring_decorator class AutoConfig: def __init__(self): raise EnvironmentError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod def for_model(cls, model_type: str, *args, **kwargs): if model_type in CONFIG_MAPPING: config_class = CONFIG_MAPPING[model_type] return config_class(*args, **kwargs) raise ValueError(f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}") @classmethod @replace_list_option_in_docstrings() def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token kwargs['_from_auto'] = True kwargs['name_or_path'] = pretrained_model_name_or_path trust_remote_code = kwargs.pop('trust_remote_code', None) code_revision = kwargs.pop('code_revision', None) (config_dict, unused_kwargs) = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) has_remote_code = 'auto_map' in config_dict and 'AutoConfig' in config_dict['auto_map'] has_local_code = 'model_type' in config_dict and config_dict['model_type'] in CONFIG_MAPPING trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) if has_remote_code and trust_remote_code: class_ref = config_dict['auto_map']['AutoConfig'] config_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs) if os.path.isdir(pretrained_model_name_or_path): config_class.register_for_auto_class() return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) elif 'model_type' in config_dict: try: config_class = CONFIG_MAPPING[config_dict['model_type']] except KeyError: raise ValueError(f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` but Transformers does not recognize this architecture. This could be because of an issue with the checkpoint, or because your version of Transformers is out of date.") return config_class.from_dict(config_dict, **unused_kwargs) else: for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): if pattern in str(pretrained_model_name_or_path): return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) raise ValueError(f"Unrecognized model in {pretrained_model_name_or_path}. Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings in its name: {', '.join(CONFIG_MAPPING.keys())}") @staticmethod def register(model_type, config, exist_ok=False): if issubclass(config, PretrainedConfig) and config.model_type != model_type: raise ValueError(f'The config you are passing has a `model_type` attribute that is not consistent with the model type you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they match!') CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) # File: transformers-main/src/transformers/models/auto/feature_extraction_auto.py """""" import importlib import json import os import warnings from collections import OrderedDict from typing import Dict, Optional, Union from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings logger = logging.get_logger(__name__) FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict([('audio-spectrogram-transformer', 'ASTFeatureExtractor'), ('beit', 'BeitFeatureExtractor'), ('chinese_clip', 'ChineseCLIPFeatureExtractor'), ('clap', 'ClapFeatureExtractor'), ('clip', 'CLIPFeatureExtractor'), ('clipseg', 'ViTFeatureExtractor'), ('clvp', 'ClvpFeatureExtractor'), ('conditional_detr', 'ConditionalDetrFeatureExtractor'), ('convnext', 'ConvNextFeatureExtractor'), ('cvt', 'ConvNextFeatureExtractor'), ('dac', 'DacFeatureExtractor'), ('data2vec-audio', 'Wav2Vec2FeatureExtractor'), ('data2vec-vision', 'BeitFeatureExtractor'), ('deformable_detr', 'DeformableDetrFeatureExtractor'), ('deit', 'DeiTFeatureExtractor'), ('detr', 'DetrFeatureExtractor'), ('dinat', 'ViTFeatureExtractor'), ('donut-swin', 'DonutFeatureExtractor'), ('dpt', 'DPTFeatureExtractor'), ('encodec', 'EncodecFeatureExtractor'), ('flava', 'FlavaFeatureExtractor'), ('glpn', 'GLPNFeatureExtractor'), ('groupvit', 'CLIPFeatureExtractor'), ('hubert', 'Wav2Vec2FeatureExtractor'), ('imagegpt', 'ImageGPTFeatureExtractor'), ('layoutlmv2', 'LayoutLMv2FeatureExtractor'), ('layoutlmv3', 'LayoutLMv3FeatureExtractor'), ('levit', 'LevitFeatureExtractor'), ('maskformer', 'MaskFormerFeatureExtractor'), ('mctct', 'MCTCTFeatureExtractor'), ('mobilenet_v1', 'MobileNetV1FeatureExtractor'), ('mobilenet_v2', 'MobileNetV2FeatureExtractor'), ('mobilevit', 'MobileViTFeatureExtractor'), ('nat', 'ViTFeatureExtractor'), ('owlvit', 'OwlViTFeatureExtractor'), ('perceiver', 'PerceiverFeatureExtractor'), ('poolformer', 'PoolFormerFeatureExtractor'), ('pop2piano', 'Pop2PianoFeatureExtractor'), ('regnet', 'ConvNextFeatureExtractor'), ('resnet', 'ConvNextFeatureExtractor'), ('seamless_m4t', 'SeamlessM4TFeatureExtractor'), ('seamless_m4t_v2', 'SeamlessM4TFeatureExtractor'), ('segformer', 'SegformerFeatureExtractor'), ('sew', 'Wav2Vec2FeatureExtractor'), ('sew-d', 'Wav2Vec2FeatureExtractor'), ('speech_to_text', 'Speech2TextFeatureExtractor'), ('speecht5', 'SpeechT5FeatureExtractor'), ('swiftformer', 'ViTFeatureExtractor'), ('swin', 'ViTFeatureExtractor'), ('swinv2', 'ViTFeatureExtractor'), ('table-transformer', 'DetrFeatureExtractor'), ('timesformer', 'VideoMAEFeatureExtractor'), ('tvlt', 'TvltFeatureExtractor'), ('unispeech', 'Wav2Vec2FeatureExtractor'), ('unispeech-sat', 'Wav2Vec2FeatureExtractor'), ('univnet', 'UnivNetFeatureExtractor'), ('van', 'ConvNextFeatureExtractor'), ('videomae', 'VideoMAEFeatureExtractor'), ('vilt', 'ViltFeatureExtractor'), ('vit', 'ViTFeatureExtractor'), ('vit_mae', 'ViTFeatureExtractor'), ('vit_msn', 'ViTFeatureExtractor'), ('wav2vec2', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-bert', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'), ('wavlm', 'Wav2Vec2FeatureExtractor'), ('whisper', 'WhisperFeatureExtractor'), ('xclip', 'CLIPFeatureExtractor'), ('yolos', 'YolosFeatureExtractor')]) FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def feature_extractor_class_from_name(class_name: str): for (module_name, extractors) in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f'.{module_name}', 'transformers.models') try: return getattr(module, class_name) except AttributeError: continue for (_, extractor) in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(extractor, '__name__', None) == class_name: return extractor main_module = importlib.import_module('transformers') if hasattr(main_module, class_name): return getattr(main_module, class_name) return None def get_feature_extractor_config(pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, resume_download: Optional[bool]=None, proxies: Optional[Dict[str, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, local_files_only: bool=False, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token resolved_config_file = get_file_from_repo(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only) if resolved_config_file is None: logger.info('Could not locate the feature extractor configuration file, will try to use the model config instead.') return {} with open(resolved_config_file, encoding='utf-8') as reader: return json.load(reader) class AutoFeatureExtractor: def __init__(self): raise EnvironmentError('AutoFeatureExtractor is designed to be instantiated using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token config = kwargs.pop('config', None) trust_remote_code = kwargs.pop('trust_remote_code', None) kwargs['_from_auto'] = True (config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) feature_extractor_class = config_dict.get('feature_extractor_type', None) feature_extractor_auto_map = None if 'AutoFeatureExtractor' in config_dict.get('auto_map', {}): feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor'] if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) feature_extractor_class = getattr(config, 'feature_extractor_type', None) if hasattr(config, 'auto_map') and 'AutoFeatureExtractor' in config.auto_map: feature_extractor_auto_map = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class) has_remote_code = feature_extractor_auto_map is not None has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) if has_remote_code and trust_remote_code: feature_extractor_class = get_class_from_dynamic_module(feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs) _ = kwargs.pop('code_revision', None) if os.path.isdir(pretrained_model_name_or_path): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(config_dict, **kwargs) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(config_dict, **kwargs) elif type(config) in FEATURE_EXTRACTOR_MAPPING: feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)] return feature_extractor_class.from_dict(config_dict, **kwargs) raise ValueError(f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a `feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys()))}") @staticmethod def register(config_class, feature_extractor_class, exist_ok=False): FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok) # File: transformers-main/src/transformers/models/auto/image_processing_auto.py """""" import importlib import json import os import warnings from collections import OrderedDict from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import BaseImageProcessor, ImageProcessingMixin from ...image_processing_utils_fast import BaseImageProcessorFast from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, is_torchvision_available, is_vision_available, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings logger = logging.get_logger(__name__) if TYPE_CHECKING: IMAGE_PROCESSOR_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict() else: IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict([('align', ('EfficientNetImageProcessor',)), ('beit', ('BeitImageProcessor',)), ('bit', ('BitImageProcessor',)), ('blip', ('BlipImageProcessor',)), ('blip-2', ('BlipImageProcessor',)), ('bridgetower', ('BridgeTowerImageProcessor',)), ('chameleon', ('ChameleonImageProcessor',)), ('chinese_clip', ('ChineseCLIPImageProcessor',)), ('clip', ('CLIPImageProcessor',)), ('clipseg', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('conditional_detr', ('ConditionalDetrImageProcessor',)), ('convnext', ('ConvNextImageProcessor',)), ('convnextv2', ('ConvNextImageProcessor',)), ('cvt', ('ConvNextImageProcessor',)), ('data2vec-vision', ('BeitImageProcessor',)), ('deformable_detr', ('DeformableDetrImageProcessor',)), ('deit', ('DeiTImageProcessor',)), ('depth_anything', ('DPTImageProcessor',)), ('deta', ('DetaImageProcessor',)), ('detr', ('DetrImageProcessor',)), ('dinat', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('dinov2', ('BitImageProcessor',)), ('donut-swin', ('DonutImageProcessor',)), ('dpt', ('DPTImageProcessor',)), ('efficientformer', ('EfficientFormerImageProcessor',)), ('efficientnet', ('EfficientNetImageProcessor',)), ('flava', ('FlavaImageProcessor',)), ('focalnet', ('BitImageProcessor',)), ('fuyu', ('FuyuImageProcessor',)), ('git', ('CLIPImageProcessor',)), ('glpn', ('GLPNImageProcessor',)), ('grounding-dino', ('GroundingDinoImageProcessor',)), ('groupvit', ('CLIPImageProcessor',)), ('hiera', ('BitImageProcessor',)), ('idefics', ('IdeficsImageProcessor',)), ('idefics2', ('Idefics2ImageProcessor',)), ('imagegpt', ('ImageGPTImageProcessor',)), ('instructblip', ('BlipImageProcessor',)), ('instructblipvideo', ('InstructBlipVideoImageProcessor',)), ('kosmos-2', ('CLIPImageProcessor',)), ('layoutlmv2', ('LayoutLMv2ImageProcessor',)), ('layoutlmv3', ('LayoutLMv3ImageProcessor',)), ('levit', ('LevitImageProcessor',)), ('llava', ('CLIPImageProcessor',)), ('llava_next', ('LlavaNextImageProcessor',)), ('llava_next_video', ('LlavaNextVideoImageProcessor',)), ('llava_onevision', ('LlavaOnevisionImageProcessor',)), ('mask2former', ('Mask2FormerImageProcessor',)), ('maskformer', ('MaskFormerImageProcessor',)), ('mgp-str', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('mobilenet_v1', ('MobileNetV1ImageProcessor',)), ('mobilenet_v2', ('MobileNetV2ImageProcessor',)), ('mobilevit', ('MobileViTImageProcessor',)), ('mobilevitv2', ('MobileViTImageProcessor',)), ('nat', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('nougat', ('NougatImageProcessor',)), ('oneformer', ('OneFormerImageProcessor',)), ('owlv2', ('Owlv2ImageProcessor',)), ('owlvit', ('OwlViTImageProcessor',)), ('perceiver', ('PerceiverImageProcessor',)), ('pix2struct', ('Pix2StructImageProcessor',)), ('pixtral', ('PixtralImageProcessor',)), ('poolformer', ('PoolFormerImageProcessor',)), ('pvt', ('PvtImageProcessor',)), ('pvt_v2', ('PvtImageProcessor',)), ('qwen2_vl', ('Qwen2VLImageProcessor',)), ('regnet', ('ConvNextImageProcessor',)), ('resnet', ('ConvNextImageProcessor',)), ('rt_detr', 'RTDetrImageProcessor'), ('sam', ('SamImageProcessor',)), ('segformer', ('SegformerImageProcessor',)), ('seggpt', ('SegGptImageProcessor',)), ('siglip', ('SiglipImageProcessor',)), ('swiftformer', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('swin', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('swin2sr', ('Swin2SRImageProcessor',)), ('swinv2', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('table-transformer', ('DetrImageProcessor',)), ('timesformer', ('VideoMAEImageProcessor',)), ('tvlt', ('TvltImageProcessor',)), ('tvp', ('TvpImageProcessor',)), ('udop', ('LayoutLMv3ImageProcessor',)), ('upernet', ('SegformerImageProcessor',)), ('van', ('ConvNextImageProcessor',)), ('videomae', ('VideoMAEImageProcessor',)), ('vilt', ('ViltImageProcessor',)), ('vipllava', ('CLIPImageProcessor',)), ('vit', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('vit_hybrid', ('ViTHybridImageProcessor',)), ('vit_mae', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('vit_msn', ('ViTImageProcessor', 'ViTImageProcessorFast')), ('vitmatte', ('VitMatteImageProcessor',)), ('xclip', ('CLIPImageProcessor',)), ('yolos', ('YolosImageProcessor',)), ('zoedepth', ('ZoeDepthImageProcessor',))]) for (model_type, image_processors) in IMAGE_PROCESSOR_MAPPING_NAMES.items(): (slow_image_processor_class, *fast_image_processor_class) = image_processors if not is_vision_available(): slow_image_processor_class = None if not fast_image_processor_class or fast_image_processor_class[0] is None or (not is_torchvision_available()): fast_image_processor_class = None else: fast_image_processor_class = fast_image_processor_class[0] IMAGE_PROCESSOR_MAPPING_NAMES[model_type] = (slow_image_processor_class, fast_image_processor_class) IMAGE_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def image_processor_class_from_name(class_name: str): if class_name == 'BaseImageProcessorFast': return BaseImageProcessorFast for (module_name, extractors) in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f'.{module_name}', 'transformers.models') try: return getattr(module, class_name) except AttributeError: continue for (_, extractors) in IMAGE_PROCESSOR_MAPPING._extra_content.items(): for extractor in extractors: if getattr(extractor, '__name__', None) == class_name: return extractor main_module = importlib.import_module('transformers') if hasattr(main_module, class_name): return getattr(main_module, class_name) return None def get_image_processor_config(pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, resume_download: Optional[bool]=None, proxies: Optional[Dict[str, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, local_files_only: bool=False, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token resolved_config_file = get_file_from_repo(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only) if resolved_config_file is None: logger.info('Could not locate the image processor configuration file, will try to use the model config instead.') return {} with open(resolved_config_file, encoding='utf-8') as reader: return json.load(reader) def _warning_fast_image_processor_available(fast_class): logger.warning(f'Fast image processor class {fast_class} is available for this model. Using slow image processor class. To use the fast image processor class set `use_fast=True`.') class AutoImageProcessor: def __init__(self): raise EnvironmentError('AutoImageProcessor is designed to be instantiated using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token config = kwargs.pop('config', None) use_fast = kwargs.pop('use_fast', None) trust_remote_code = kwargs.pop('trust_remote_code', None) kwargs['_from_auto'] = True (config_dict, _) = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) image_processor_class = config_dict.get('image_processor_type', None) image_processor_auto_map = None if 'AutoImageProcessor' in config_dict.get('auto_map', {}): image_processor_auto_map = config_dict['auto_map']['AutoImageProcessor'] if image_processor_class is None and image_processor_auto_map is None: feature_extractor_class = config_dict.pop('feature_extractor_type', None) if feature_extractor_class is not None: image_processor_class = feature_extractor_class.replace('FeatureExtractor', 'ImageProcessor') if 'AutoFeatureExtractor' in config_dict.get('auto_map', {}): feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor'] image_processor_auto_map = feature_extractor_auto_map.replace('FeatureExtractor', 'ImageProcessor') if image_processor_class is None and image_processor_auto_map is None: if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) image_processor_class = getattr(config, 'image_processor_type', None) if hasattr(config, 'auto_map') and 'AutoImageProcessor' in config.auto_map: image_processor_auto_map = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: if use_fast is not None: if use_fast and (not image_processor_class.endswith('Fast')): image_processor_class += 'Fast' elif not use_fast and image_processor_class.endswith('Fast'): image_processor_class = image_processor_class[:-4] image_processor_class = image_processor_class_from_name(image_processor_class) has_remote_code = image_processor_auto_map is not None has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) if image_processor_auto_map is not None and (not isinstance(image_processor_auto_map, tuple)): image_processor_auto_map = (image_processor_auto_map, None) if has_remote_code and trust_remote_code: if not use_fast and image_processor_auto_map[1] is not None: _warning_fast_image_processor_available(image_processor_auto_map[1]) if use_fast and image_processor_auto_map[1] is not None: class_ref = image_processor_auto_map[1] else: class_ref = image_processor_auto_map[0] image_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs) _ = kwargs.pop('code_revision', None) if os.path.isdir(pretrained_model_name_or_path): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(config_dict, **kwargs) elif image_processor_class is not None: return image_processor_class.from_dict(config_dict, **kwargs) elif type(config) in IMAGE_PROCESSOR_MAPPING: image_processor_tuple = IMAGE_PROCESSOR_MAPPING[type(config)] (image_processor_class_py, image_processor_class_fast) = image_processor_tuple if not use_fast and image_processor_class_fast is not None: _warning_fast_image_processor_available(image_processor_class_fast) if image_processor_class_fast and (use_fast or image_processor_class_py is None): return image_processor_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) elif image_processor_class_py is not None: return image_processor_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: raise ValueError('This image processor cannot be instantiated. Please make sure you have `Pillow` installed.') raise ValueError(f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a `image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys()))}") @staticmethod def register(config_class, image_processor_class=None, slow_image_processor_class=None, fast_image_processor_class=None, exist_ok=False): if image_processor_class is not None: if slow_image_processor_class is not None: raise ValueError('Cannot specify both image_processor_class and slow_image_processor_class') warnings.warn('The image_processor_class argument is deprecated and will be removed in v4.42. Please use `slow_image_processor_class`, or `fast_image_processor_class` instead', FutureWarning) slow_image_processor_class = image_processor_class if slow_image_processor_class is None and fast_image_processor_class is None: raise ValueError('You need to specify either slow_image_processor_class or fast_image_processor_class') if slow_image_processor_class is not None and issubclass(slow_image_processor_class, BaseImageProcessorFast): raise ValueError('You passed a fast image processor in as the `slow_image_processor_class`.') if fast_image_processor_class is not None and issubclass(fast_image_processor_class, BaseImageProcessor): raise ValueError('You passed a slow image processor in as the `fast_image_processor_class`.') if slow_image_processor_class is not None and fast_image_processor_class is not None and issubclass(fast_image_processor_class, BaseImageProcessorFast) and (fast_image_processor_class.slow_image_processor_class != slow_image_processor_class): raise ValueError(f'The fast processor class you are passing has a `slow_image_processor_class` attribute that is not consistent with the slow processor class you passed (fast tokenizer has {fast_image_processor_class.slow_image_processor_class} and you passed {slow_image_processor_class}. Fix one of those so they match!') if config_class in IMAGE_PROCESSOR_MAPPING._extra_content: (existing_slow, existing_fast) = IMAGE_PROCESSOR_MAPPING[config_class] if slow_image_processor_class is None: slow_image_processor_class = existing_slow if fast_image_processor_class is None: fast_image_processor_class = existing_fast IMAGE_PROCESSOR_MAPPING.register(config_class, (slow_image_processor_class, fast_image_processor_class), exist_ok=exist_ok) # File: transformers-main/src/transformers/models/auto/modeling_auto.py """""" import warnings from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoBackboneClass, _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES logger = logging.get_logger(__name__) MODEL_MAPPING_NAMES = OrderedDict([('albert', 'AlbertModel'), ('align', 'AlignModel'), ('altclip', 'AltCLIPModel'), ('audio-spectrogram-transformer', 'ASTModel'), ('autoformer', 'AutoformerModel'), ('bark', 'BarkModel'), ('bart', 'BartModel'), ('beit', 'BeitModel'), ('bert', 'BertModel'), ('bert-generation', 'BertGenerationEncoder'), ('big_bird', 'BigBirdModel'), ('bigbird_pegasus', 'BigBirdPegasusModel'), ('biogpt', 'BioGptModel'), ('bit', 'BitModel'), ('blenderbot', 'BlenderbotModel'), ('blenderbot-small', 'BlenderbotSmallModel'), ('blip', 'BlipModel'), ('blip-2', 'Blip2Model'), ('bloom', 'BloomModel'), ('bridgetower', 'BridgeTowerModel'), ('bros', 'BrosModel'), ('camembert', 'CamembertModel'), ('canine', 'CanineModel'), ('chameleon', 'ChameleonModel'), ('chinese_clip', 'ChineseCLIPModel'), ('chinese_clip_vision_model', 'ChineseCLIPVisionModel'), ('clap', 'ClapModel'), ('clip', 'CLIPModel'), ('clip_vision_model', 'CLIPVisionModel'), ('clipseg', 'CLIPSegModel'), ('clvp', 'ClvpModelForConditionalGeneration'), ('code_llama', 'LlamaModel'), ('codegen', 'CodeGenModel'), ('cohere', 'CohereModel'), ('conditional_detr', 'ConditionalDetrModel'), ('convbert', 'ConvBertModel'), ('convnext', 'ConvNextModel'), ('convnextv2', 'ConvNextV2Model'), ('cpmant', 'CpmAntModel'), ('ctrl', 'CTRLModel'), ('cvt', 'CvtModel'), ('dac', 'DacModel'), ('data2vec-audio', 'Data2VecAudioModel'), ('data2vec-text', 'Data2VecTextModel'), ('data2vec-vision', 'Data2VecVisionModel'), ('dbrx', 'DbrxModel'), ('deberta', 'DebertaModel'), ('deberta-v2', 'DebertaV2Model'), ('decision_transformer', 'DecisionTransformerModel'), ('deformable_detr', 'DeformableDetrModel'), ('deit', 'DeiTModel'), ('deta', 'DetaModel'), ('detr', 'DetrModel'), ('dinat', 'DinatModel'), ('dinov2', 'Dinov2Model'), ('distilbert', 'DistilBertModel'), ('donut-swin', 'DonutSwinModel'), ('dpr', 'DPRQuestionEncoder'), ('dpt', 'DPTModel'), ('efficientformer', 'EfficientFormerModel'), ('efficientnet', 'EfficientNetModel'), ('electra', 'ElectraModel'), ('encodec', 'EncodecModel'), ('ernie', 'ErnieModel'), ('ernie_m', 'ErnieMModel'), ('esm', 'EsmModel'), ('falcon', 'FalconModel'), ('falcon_mamba', 'FalconMambaModel'), ('fastspeech2_conformer', 'FastSpeech2ConformerModel'), ('flaubert', 'FlaubertModel'), ('flava', 'FlavaModel'), ('fnet', 'FNetModel'), ('focalnet', 'FocalNetModel'), ('fsmt', 'FSMTModel'), ('funnel', ('FunnelModel', 'FunnelBaseModel')), ('gemma', 'GemmaModel'), ('gemma2', 'Gemma2Model'), ('git', 'GitModel'), ('glpn', 'GLPNModel'), ('gpt-sw3', 'GPT2Model'), ('gpt2', 'GPT2Model'), ('gpt_bigcode', 'GPTBigCodeModel'), ('gpt_neo', 'GPTNeoModel'), ('gpt_neox', 'GPTNeoXModel'), ('gpt_neox_japanese', 'GPTNeoXJapaneseModel'), ('gptj', 'GPTJModel'), ('gptsan-japanese', 'GPTSanJapaneseForConditionalGeneration'), ('granite', 'GraniteModel'), ('graphormer', 'GraphormerModel'), ('grounding-dino', 'GroundingDinoModel'), ('groupvit', 'GroupViTModel'), ('hiera', 'HieraModel'), ('hubert', 'HubertModel'), ('ibert', 'IBertModel'), ('idefics', 'IdeficsModel'), ('idefics2', 'Idefics2Model'), ('imagegpt', 'ImageGPTModel'), ('informer', 'InformerModel'), ('jamba', 'JambaModel'), ('jetmoe', 'JetMoeModel'), ('jukebox', 'JukeboxModel'), ('kosmos-2', 'Kosmos2Model'), ('layoutlm', 'LayoutLMModel'), ('layoutlmv2', 'LayoutLMv2Model'), ('layoutlmv3', 'LayoutLMv3Model'), ('led', 'LEDModel'), ('levit', 'LevitModel'), ('lilt', 'LiltModel'), ('llama', 'LlamaModel'), ('longformer', 'LongformerModel'), ('longt5', 'LongT5Model'), ('luke', 'LukeModel'), ('lxmert', 'LxmertModel'), ('m2m_100', 'M2M100Model'), ('mamba', 'MambaModel'), ('mamba2', 'Mamba2Model'), ('marian', 'MarianModel'), ('markuplm', 'MarkupLMModel'), ('mask2former', 'Mask2FormerModel'), ('maskformer', 'MaskFormerModel'), ('maskformer-swin', 'MaskFormerSwinModel'), ('mbart', 'MBartModel'), ('mctct', 'MCTCTModel'), ('mega', 'MegaModel'), ('megatron-bert', 'MegatronBertModel'), ('mgp-str', 'MgpstrForSceneTextRecognition'), ('mistral', 'MistralModel'), ('mixtral', 'MixtralModel'), ('mobilebert', 'MobileBertModel'), ('mobilenet_v1', 'MobileNetV1Model'), ('mobilenet_v2', 'MobileNetV2Model'), ('mobilevit', 'MobileViTModel'), ('mobilevitv2', 'MobileViTV2Model'), ('mpnet', 'MPNetModel'), ('mpt', 'MptModel'), ('mra', 'MraModel'), ('mt5', 'MT5Model'), ('musicgen', 'MusicgenModel'), ('musicgen_melody', 'MusicgenMelodyModel'), ('mvp', 'MvpModel'), ('nat', 'NatModel'), ('nemotron', 'NemotronModel'), ('nezha', 'NezhaModel'), ('nllb-moe', 'NllbMoeModel'), ('nystromformer', 'NystromformerModel'), ('olmo', 'OlmoModel'), ('olmoe', 'OlmoeModel'), ('oneformer', 'OneFormerModel'), ('open-llama', 'OpenLlamaModel'), ('openai-gpt', 'OpenAIGPTModel'), ('opt', 'OPTModel'), ('owlv2', 'Owlv2Model'), ('owlvit', 'OwlViTModel'), ('patchtsmixer', 'PatchTSMixerModel'), ('patchtst', 'PatchTSTModel'), ('pegasus', 'PegasusModel'), ('pegasus_x', 'PegasusXModel'), ('perceiver', 'PerceiverModel'), ('persimmon', 'PersimmonModel'), ('phi', 'PhiModel'), ('phi3', 'Phi3Model'), ('pixtral', 'PixtralModel'), ('plbart', 'PLBartModel'), ('poolformer', 'PoolFormerModel'), ('prophetnet', 'ProphetNetModel'), ('pvt', 'PvtModel'), ('pvt_v2', 'PvtV2Model'), ('qdqbert', 'QDQBertModel'), ('qwen2', 'Qwen2Model'), ('qwen2_audio_encoder', 'Qwen2AudioEncoder'), ('qwen2_moe', 'Qwen2MoeModel'), ('qwen2_vl', 'Qwen2VLModel'), ('recurrent_gemma', 'RecurrentGemmaModel'), ('reformer', 'ReformerModel'), ('regnet', 'RegNetModel'), ('rembert', 'RemBertModel'), ('resnet', 'ResNetModel'), ('retribert', 'RetriBertModel'), ('roberta', 'RobertaModel'), ('roberta-prelayernorm', 'RobertaPreLayerNormModel'), ('roc_bert', 'RoCBertModel'), ('roformer', 'RoFormerModel'), ('rt_detr', 'RTDetrModel'), ('rwkv', 'RwkvModel'), ('sam', 'SamModel'), ('seamless_m4t', 'SeamlessM4TModel'), ('seamless_m4t_v2', 'SeamlessM4Tv2Model'), ('segformer', 'SegformerModel'), ('seggpt', 'SegGptModel'), ('sew', 'SEWModel'), ('sew-d', 'SEWDModel'), ('siglip', 'SiglipModel'), ('siglip_vision_model', 'SiglipVisionModel'), ('speech_to_text', 'Speech2TextModel'), ('speecht5', 'SpeechT5Model'), ('splinter', 'SplinterModel'), ('squeezebert', 'SqueezeBertModel'), ('stablelm', 'StableLmModel'), ('starcoder2', 'Starcoder2Model'), ('swiftformer', 'SwiftFormerModel'), ('swin', 'SwinModel'), ('swin2sr', 'Swin2SRModel'), ('swinv2', 'Swinv2Model'), ('switch_transformers', 'SwitchTransformersModel'), ('t5', 'T5Model'), ('table-transformer', 'TableTransformerModel'), ('tapas', 'TapasModel'), ('time_series_transformer', 'TimeSeriesTransformerModel'), ('timesformer', 'TimesformerModel'), ('timm_backbone', 'TimmBackbone'), ('trajectory_transformer', 'TrajectoryTransformerModel'), ('transfo-xl', 'TransfoXLModel'), ('tvlt', 'TvltModel'), ('tvp', 'TvpModel'), ('udop', 'UdopModel'), ('umt5', 'UMT5Model'), ('unispeech', 'UniSpeechModel'), ('unispeech-sat', 'UniSpeechSatModel'), ('univnet', 'UnivNetModel'), ('van', 'VanModel'), ('videomae', 'VideoMAEModel'), ('vilt', 'ViltModel'), ('vision-text-dual-encoder', 'VisionTextDualEncoderModel'), ('visual_bert', 'VisualBertModel'), ('vit', 'ViTModel'), ('vit_hybrid', 'ViTHybridModel'), ('vit_mae', 'ViTMAEModel'), ('vit_msn', 'ViTMSNModel'), ('vitdet', 'VitDetModel'), ('vits', 'VitsModel'), ('vivit', 'VivitModel'), ('wav2vec2', 'Wav2Vec2Model'), ('wav2vec2-bert', 'Wav2Vec2BertModel'), ('wav2vec2-conformer', 'Wav2Vec2ConformerModel'), ('wavlm', 'WavLMModel'), ('whisper', 'WhisperModel'), ('xclip', 'XCLIPModel'), ('xglm', 'XGLMModel'), ('xlm', 'XLMModel'), ('xlm-prophetnet', 'XLMProphetNetModel'), ('xlm-roberta', 'XLMRobertaModel'), ('xlm-roberta-xl', 'XLMRobertaXLModel'), ('xlnet', 'XLNetModel'), ('xmod', 'XmodModel'), ('yolos', 'YolosModel'), ('yoso', 'YosoModel')]) MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForPreTraining'), ('bart', 'BartForConditionalGeneration'), ('bert', 'BertForPreTraining'), ('big_bird', 'BigBirdForPreTraining'), ('bloom', 'BloomForCausalLM'), ('camembert', 'CamembertForMaskedLM'), ('ctrl', 'CTRLLMHeadModel'), ('data2vec-text', 'Data2VecTextForMaskedLM'), ('deberta', 'DebertaForMaskedLM'), ('deberta-v2', 'DebertaV2ForMaskedLM'), ('distilbert', 'DistilBertForMaskedLM'), ('electra', 'ElectraForPreTraining'), ('ernie', 'ErnieForPreTraining'), ('falcon_mamba', 'FalconMambaForCausalLM'), ('flaubert', 'FlaubertWithLMHeadModel'), ('flava', 'FlavaForPreTraining'), ('fnet', 'FNetForPreTraining'), ('fsmt', 'FSMTForConditionalGeneration'), ('funnel', 'FunnelForPreTraining'), ('gpt-sw3', 'GPT2LMHeadModel'), ('gpt2', 'GPT2LMHeadModel'), ('gpt_bigcode', 'GPTBigCodeForCausalLM'), ('gptsan-japanese', 'GPTSanJapaneseForConditionalGeneration'), ('hiera', 'HieraForPreTraining'), ('ibert', 'IBertForMaskedLM'), ('idefics', 'IdeficsForVisionText2Text'), ('idefics2', 'Idefics2ForConditionalGeneration'), ('layoutlm', 'LayoutLMForMaskedLM'), ('llava', 'LlavaForConditionalGeneration'), ('llava_next', 'LlavaNextForConditionalGeneration'), ('llava_next_video', 'LlavaNextVideoForConditionalGeneration'), ('llava_onevision', 'LlavaOnevisionForConditionalGeneration'), ('longformer', 'LongformerForMaskedLM'), ('luke', 'LukeForMaskedLM'), ('lxmert', 'LxmertForPreTraining'), ('mamba', 'MambaForCausalLM'), ('mamba2', 'Mamba2ForCausalLM'), ('mega', 'MegaForMaskedLM'), ('megatron-bert', 'MegatronBertForPreTraining'), ('mobilebert', 'MobileBertForPreTraining'), ('mpnet', 'MPNetForMaskedLM'), ('mpt', 'MptForCausalLM'), ('mra', 'MraForMaskedLM'), ('mvp', 'MvpForConditionalGeneration'), ('nezha', 'NezhaForPreTraining'), ('nllb-moe', 'NllbMoeForConditionalGeneration'), ('openai-gpt', 'OpenAIGPTLMHeadModel'), ('paligemma', 'PaliGemmaForConditionalGeneration'), ('qwen2_audio', 'Qwen2AudioForConditionalGeneration'), ('retribert', 'RetriBertModel'), ('roberta', 'RobertaForMaskedLM'), ('roberta-prelayernorm', 'RobertaPreLayerNormForMaskedLM'), ('roc_bert', 'RoCBertForPreTraining'), ('rwkv', 'RwkvForCausalLM'), ('splinter', 'SplinterForPreTraining'), ('squeezebert', 'SqueezeBertForMaskedLM'), ('switch_transformers', 'SwitchTransformersForConditionalGeneration'), ('t5', 'T5ForConditionalGeneration'), ('tapas', 'TapasForMaskedLM'), ('transfo-xl', 'TransfoXLLMHeadModel'), ('tvlt', 'TvltForPreTraining'), ('unispeech', 'UniSpeechForPreTraining'), ('unispeech-sat', 'UniSpeechSatForPreTraining'), ('video_llava', 'VideoLlavaForConditionalGeneration'), ('videomae', 'VideoMAEForPreTraining'), ('vipllava', 'VipLlavaForConditionalGeneration'), ('visual_bert', 'VisualBertForPreTraining'), ('vit_mae', 'ViTMAEForPreTraining'), ('wav2vec2', 'Wav2Vec2ForPreTraining'), ('wav2vec2-conformer', 'Wav2Vec2ConformerForPreTraining'), ('xlm', 'XLMWithLMHeadModel'), ('xlm-roberta', 'XLMRobertaForMaskedLM'), ('xlm-roberta-xl', 'XLMRobertaXLForMaskedLM'), ('xlnet', 'XLNetLMHeadModel'), ('xmod', 'XmodForMaskedLM')]) MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForMaskedLM'), ('bart', 'BartForConditionalGeneration'), ('bert', 'BertForMaskedLM'), ('big_bird', 'BigBirdForMaskedLM'), ('bigbird_pegasus', 'BigBirdPegasusForConditionalGeneration'), ('blenderbot-small', 'BlenderbotSmallForConditionalGeneration'), ('bloom', 'BloomForCausalLM'), ('camembert', 'CamembertForMaskedLM'), ('codegen', 'CodeGenForCausalLM'), ('convbert', 'ConvBertForMaskedLM'), ('cpmant', 'CpmAntForCausalLM'), ('ctrl', 'CTRLLMHeadModel'), ('data2vec-text', 'Data2VecTextForMaskedLM'), ('deberta', 'DebertaForMaskedLM'), ('deberta-v2', 'DebertaV2ForMaskedLM'), ('distilbert', 'DistilBertForMaskedLM'), ('electra', 'ElectraForMaskedLM'), ('encoder-decoder', 'EncoderDecoderModel'), ('ernie', 'ErnieForMaskedLM'), ('esm', 'EsmForMaskedLM'), ('falcon_mamba', 'FalconMambaForCausalLM'), ('flaubert', 'FlaubertWithLMHeadModel'), ('fnet', 'FNetForMaskedLM'), ('fsmt', 'FSMTForConditionalGeneration'), ('funnel', 'FunnelForMaskedLM'), ('git', 'GitForCausalLM'), ('gpt-sw3', 'GPT2LMHeadModel'), ('gpt2', 'GPT2LMHeadModel'), ('gpt_bigcode', 'GPTBigCodeForCausalLM'), ('gpt_neo', 'GPTNeoForCausalLM'), ('gpt_neox', 'GPTNeoXForCausalLM'), ('gpt_neox_japanese', 'GPTNeoXJapaneseForCausalLM'), ('gptj', 'GPTJForCausalLM'), ('gptsan-japanese', 'GPTSanJapaneseForConditionalGeneration'), ('ibert', 'IBertForMaskedLM'), ('layoutlm', 'LayoutLMForMaskedLM'), ('led', 'LEDForConditionalGeneration'), ('longformer', 'LongformerForMaskedLM'), ('longt5', 'LongT5ForConditionalGeneration'), ('luke', 'LukeForMaskedLM'), ('m2m_100', 'M2M100ForConditionalGeneration'), ('mamba', 'MambaForCausalLM'), ('mamba2', 'Mamba2ForCausalLM'), ('marian', 'MarianMTModel'), ('mega', 'MegaForMaskedLM'), ('megatron-bert', 'MegatronBertForCausalLM'), ('mobilebert', 'MobileBertForMaskedLM'), ('mpnet', 'MPNetForMaskedLM'), ('mpt', 'MptForCausalLM'), ('mra', 'MraForMaskedLM'), ('mvp', 'MvpForConditionalGeneration'), ('nezha', 'NezhaForMaskedLM'), ('nllb-moe', 'NllbMoeForConditionalGeneration'), ('nystromformer', 'NystromformerForMaskedLM'), ('openai-gpt', 'OpenAIGPTLMHeadModel'), ('pegasus_x', 'PegasusXForConditionalGeneration'), ('plbart', 'PLBartForConditionalGeneration'), ('pop2piano', 'Pop2PianoForConditionalGeneration'), ('qdqbert', 'QDQBertForMaskedLM'), ('reformer', 'ReformerModelWithLMHead'), ('rembert', 'RemBertForMaskedLM'), ('roberta', 'RobertaForMaskedLM'), ('roberta-prelayernorm', 'RobertaPreLayerNormForMaskedLM'), ('roc_bert', 'RoCBertForMaskedLM'), ('roformer', 'RoFormerForMaskedLM'), ('rwkv', 'RwkvForCausalLM'), ('speech_to_text', 'Speech2TextForConditionalGeneration'), ('squeezebert', 'SqueezeBertForMaskedLM'), ('switch_transformers', 'SwitchTransformersForConditionalGeneration'), ('t5', 'T5ForConditionalGeneration'), ('tapas', 'TapasForMaskedLM'), ('transfo-xl', 'TransfoXLLMHeadModel'), ('wav2vec2', 'Wav2Vec2ForMaskedLM'), ('whisper', 'WhisperForConditionalGeneration'), ('xlm', 'XLMWithLMHeadModel'), ('xlm-roberta', 'XLMRobertaForMaskedLM'), ('xlm-roberta-xl', 'XLMRobertaXLForMaskedLM'), ('xlnet', 'XLNetLMHeadModel'), ('xmod', 'XmodForMaskedLM'), ('yoso', 'YosoForMaskedLM')]) MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bart', 'BartForCausalLM'), ('bert', 'BertLMHeadModel'), ('bert-generation', 'BertGenerationDecoder'), ('big_bird', 'BigBirdForCausalLM'), ('bigbird_pegasus', 'BigBirdPegasusForCausalLM'), ('biogpt', 'BioGptForCausalLM'), ('blenderbot', 'BlenderbotForCausalLM'), ('blenderbot-small', 'BlenderbotSmallForCausalLM'), ('bloom', 'BloomForCausalLM'), ('camembert', 'CamembertForCausalLM'), ('code_llama', 'LlamaForCausalLM'), ('codegen', 'CodeGenForCausalLM'), ('cohere', 'CohereForCausalLM'), ('cpmant', 'CpmAntForCausalLM'), ('ctrl', 'CTRLLMHeadModel'), ('data2vec-text', 'Data2VecTextForCausalLM'), ('dbrx', 'DbrxForCausalLM'), ('electra', 'ElectraForCausalLM'), ('ernie', 'ErnieForCausalLM'), ('falcon', 'FalconForCausalLM'), ('falcon_mamba', 'FalconMambaForCausalLM'), ('fuyu', 'FuyuForCausalLM'), ('gemma', 'GemmaForCausalLM'), ('gemma2', 'Gemma2ForCausalLM'), ('git', 'GitForCausalLM'), ('gpt-sw3', 'GPT2LMHeadModel'), ('gpt2', 'GPT2LMHeadModel'), ('gpt_bigcode', 'GPTBigCodeForCausalLM'), ('gpt_neo', 'GPTNeoForCausalLM'), ('gpt_neox', 'GPTNeoXForCausalLM'), ('gpt_neox_japanese', 'GPTNeoXJapaneseForCausalLM'), ('gptj', 'GPTJForCausalLM'), ('granite', 'GraniteForCausalLM'), ('jamba', 'JambaForCausalLM'), ('jetmoe', 'JetMoeForCausalLM'), ('llama', 'LlamaForCausalLM'), ('mamba', 'MambaForCausalLM'), ('mamba2', 'Mamba2ForCausalLM'), ('marian', 'MarianForCausalLM'), ('mbart', 'MBartForCausalLM'), ('mega', 'MegaForCausalLM'), ('megatron-bert', 'MegatronBertForCausalLM'), ('mistral', 'MistralForCausalLM'), ('mixtral', 'MixtralForCausalLM'), ('mpt', 'MptForCausalLM'), ('musicgen', 'MusicgenForCausalLM'), ('musicgen_melody', 'MusicgenMelodyForCausalLM'), ('mvp', 'MvpForCausalLM'), ('nemotron', 'NemotronForCausalLM'), ('olmo', 'OlmoForCausalLM'), ('olmoe', 'OlmoeForCausalLM'), ('open-llama', 'OpenLlamaForCausalLM'), ('openai-gpt', 'OpenAIGPTLMHeadModel'), ('opt', 'OPTForCausalLM'), ('pegasus', 'PegasusForCausalLM'), ('persimmon', 'PersimmonForCausalLM'), ('phi', 'PhiForCausalLM'), ('phi3', 'Phi3ForCausalLM'), ('plbart', 'PLBartForCausalLM'), ('prophetnet', 'ProphetNetForCausalLM'), ('qdqbert', 'QDQBertLMHeadModel'), ('qwen2', 'Qwen2ForCausalLM'), ('qwen2_moe', 'Qwen2MoeForCausalLM'), ('recurrent_gemma', 'RecurrentGemmaForCausalLM'), ('reformer', 'ReformerModelWithLMHead'), ('rembert', 'RemBertForCausalLM'), ('roberta', 'RobertaForCausalLM'), ('roberta-prelayernorm', 'RobertaPreLayerNormForCausalLM'), ('roc_bert', 'RoCBertForCausalLM'), ('roformer', 'RoFormerForCausalLM'), ('rwkv', 'RwkvForCausalLM'), ('speech_to_text_2', 'Speech2Text2ForCausalLM'), ('stablelm', 'StableLmForCausalLM'), ('starcoder2', 'Starcoder2ForCausalLM'), ('transfo-xl', 'TransfoXLLMHeadModel'), ('trocr', 'TrOCRForCausalLM'), ('whisper', 'WhisperForCausalLM'), ('xglm', 'XGLMForCausalLM'), ('xlm', 'XLMWithLMHeadModel'), ('xlm-prophetnet', 'XLMProphetNetForCausalLM'), ('xlm-roberta', 'XLMRobertaForCausalLM'), ('xlm-roberta-xl', 'XLMRobertaXLForCausalLM'), ('xlnet', 'XLNetLMHeadModel'), ('xmod', 'XmodForCausalLM')]) MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict([('beit', 'BeitModel'), ('bit', 'BitModel'), ('conditional_detr', 'ConditionalDetrModel'), ('convnext', 'ConvNextModel'), ('convnextv2', 'ConvNextV2Model'), ('data2vec-vision', 'Data2VecVisionModel'), ('deformable_detr', 'DeformableDetrModel'), ('deit', 'DeiTModel'), ('deta', 'DetaModel'), ('detr', 'DetrModel'), ('dinat', 'DinatModel'), ('dinov2', 'Dinov2Model'), ('dpt', 'DPTModel'), ('efficientformer', 'EfficientFormerModel'), ('efficientnet', 'EfficientNetModel'), ('focalnet', 'FocalNetModel'), ('glpn', 'GLPNModel'), ('hiera', 'HieraModel'), ('imagegpt', 'ImageGPTModel'), ('levit', 'LevitModel'), ('mobilenet_v1', 'MobileNetV1Model'), ('mobilenet_v2', 'MobileNetV2Model'), ('mobilevit', 'MobileViTModel'), ('mobilevitv2', 'MobileViTV2Model'), ('nat', 'NatModel'), ('poolformer', 'PoolFormerModel'), ('pvt', 'PvtModel'), ('regnet', 'RegNetModel'), ('resnet', 'ResNetModel'), ('segformer', 'SegformerModel'), ('siglip_vision_model', 'SiglipVisionModel'), ('swiftformer', 'SwiftFormerModel'), ('swin', 'SwinModel'), ('swin2sr', 'Swin2SRModel'), ('swinv2', 'Swinv2Model'), ('table-transformer', 'TableTransformerModel'), ('timesformer', 'TimesformerModel'), ('timm_backbone', 'TimmBackbone'), ('van', 'VanModel'), ('videomae', 'VideoMAEModel'), ('vit', 'ViTModel'), ('vit_hybrid', 'ViTHybridModel'), ('vit_mae', 'ViTMAEModel'), ('vit_msn', 'ViTMSNModel'), ('vitdet', 'VitDetModel'), ('vivit', 'VivitModel'), ('yolos', 'YolosModel')]) MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict([('deit', 'DeiTForMaskedImageModeling'), ('focalnet', 'FocalNetForMaskedImageModeling'), ('swin', 'SwinForMaskedImageModeling'), ('swinv2', 'Swinv2ForMaskedImageModeling'), ('vit', 'ViTForMaskedImageModeling')]) MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict([('imagegpt', 'ImageGPTForCausalImageModeling')]) MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('beit', 'BeitForImageClassification'), ('bit', 'BitForImageClassification'), ('clip', 'CLIPForImageClassification'), ('convnext', 'ConvNextForImageClassification'), ('convnextv2', 'ConvNextV2ForImageClassification'), ('cvt', 'CvtForImageClassification'), ('data2vec-vision', 'Data2VecVisionForImageClassification'), ('deit', ('DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher')), ('dinat', 'DinatForImageClassification'), ('dinov2', 'Dinov2ForImageClassification'), ('efficientformer', ('EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher')), ('efficientnet', 'EfficientNetForImageClassification'), ('focalnet', 'FocalNetForImageClassification'), ('hiera', 'HieraForImageClassification'), ('imagegpt', 'ImageGPTForImageClassification'), ('levit', ('LevitForImageClassification', 'LevitForImageClassificationWithTeacher')), ('mobilenet_v1', 'MobileNetV1ForImageClassification'), ('mobilenet_v2', 'MobileNetV2ForImageClassification'), ('mobilevit', 'MobileViTForImageClassification'), ('mobilevitv2', 'MobileViTV2ForImageClassification'), ('nat', 'NatForImageClassification'), ('perceiver', ('PerceiverForImageClassificationLearned', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationConvProcessing')), ('poolformer', 'PoolFormerForImageClassification'), ('pvt', 'PvtForImageClassification'), ('pvt_v2', 'PvtV2ForImageClassification'), ('regnet', 'RegNetForImageClassification'), ('resnet', 'ResNetForImageClassification'), ('segformer', 'SegformerForImageClassification'), ('siglip', 'SiglipForImageClassification'), ('swiftformer', 'SwiftFormerForImageClassification'), ('swin', 'SwinForImageClassification'), ('swinv2', 'Swinv2ForImageClassification'), ('van', 'VanForImageClassification'), ('vit', 'ViTForImageClassification'), ('vit_hybrid', 'ViTHybridForImageClassification'), ('vit_msn', 'ViTMSNForImageClassification')]) MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict([('detr', 'DetrForSegmentation')]) MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict([('beit', 'BeitForSemanticSegmentation'), ('data2vec-vision', 'Data2VecVisionForSemanticSegmentation'), ('dpt', 'DPTForSemanticSegmentation'), ('mobilenet_v2', 'MobileNetV2ForSemanticSegmentation'), ('mobilevit', 'MobileViTForSemanticSegmentation'), ('mobilevitv2', 'MobileViTV2ForSemanticSegmentation'), ('segformer', 'SegformerForSemanticSegmentation'), ('upernet', 'UperNetForSemanticSegmentation')]) MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict([('maskformer', 'MaskFormerForInstanceSegmentation')]) MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict([('detr', 'DetrForSegmentation'), ('mask2former', 'Mask2FormerForUniversalSegmentation'), ('maskformer', 'MaskFormerForInstanceSegmentation'), ('oneformer', 'OneFormerForUniversalSegmentation')]) MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('timesformer', 'TimesformerForVideoClassification'), ('videomae', 'VideoMAEForVideoClassification'), ('vivit', 'VivitForVideoClassification')]) MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict([('blip', 'BlipForConditionalGeneration'), ('blip-2', 'Blip2ForConditionalGeneration'), ('chameleon', 'ChameleonForConditionalGeneration'), ('git', 'GitForCausalLM'), ('idefics2', 'Idefics2ForConditionalGeneration'), ('instructblip', 'InstructBlipForConditionalGeneration'), ('instructblipvideo', 'InstructBlipVideoForConditionalGeneration'), ('kosmos-2', 'Kosmos2ForConditionalGeneration'), ('llava', 'LlavaForConditionalGeneration'), ('llava_next', 'LlavaNextForConditionalGeneration'), ('llava_next_video', 'LlavaNextVideoForConditionalGeneration'), ('llava_onevision', 'LlavaOnevisionForConditionalGeneration'), ('paligemma', 'PaliGemmaForConditionalGeneration'), ('pix2struct', 'Pix2StructForConditionalGeneration'), ('qwen2_vl', 'Qwen2VLForConditionalGeneration'), ('video_llava', 'VideoLlavaForConditionalGeneration'), ('vipllava', 'VipLlavaForConditionalGeneration'), ('vision-encoder-decoder', 'VisionEncoderDecoderModel')]) MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForMaskedLM'), ('bart', 'BartForConditionalGeneration'), ('bert', 'BertForMaskedLM'), ('big_bird', 'BigBirdForMaskedLM'), ('camembert', 'CamembertForMaskedLM'), ('convbert', 'ConvBertForMaskedLM'), ('data2vec-text', 'Data2VecTextForMaskedLM'), ('deberta', 'DebertaForMaskedLM'), ('deberta-v2', 'DebertaV2ForMaskedLM'), ('distilbert', 'DistilBertForMaskedLM'), ('electra', 'ElectraForMaskedLM'), ('ernie', 'ErnieForMaskedLM'), ('esm', 'EsmForMaskedLM'), ('flaubert', 'FlaubertWithLMHeadModel'), ('fnet', 'FNetForMaskedLM'), ('funnel', 'FunnelForMaskedLM'), ('ibert', 'IBertForMaskedLM'), ('layoutlm', 'LayoutLMForMaskedLM'), ('longformer', 'LongformerForMaskedLM'), ('luke', 'LukeForMaskedLM'), ('mbart', 'MBartForConditionalGeneration'), ('mega', 'MegaForMaskedLM'), ('megatron-bert', 'MegatronBertForMaskedLM'), ('mobilebert', 'MobileBertForMaskedLM'), ('mpnet', 'MPNetForMaskedLM'), ('mra', 'MraForMaskedLM'), ('mvp', 'MvpForConditionalGeneration'), ('nezha', 'NezhaForMaskedLM'), ('nystromformer', 'NystromformerForMaskedLM'), ('perceiver', 'PerceiverForMaskedLM'), ('qdqbert', 'QDQBertForMaskedLM'), ('reformer', 'ReformerForMaskedLM'), ('rembert', 'RemBertForMaskedLM'), ('roberta', 'RobertaForMaskedLM'), ('roberta-prelayernorm', 'RobertaPreLayerNormForMaskedLM'), ('roc_bert', 'RoCBertForMaskedLM'), ('roformer', 'RoFormerForMaskedLM'), ('squeezebert', 'SqueezeBertForMaskedLM'), ('tapas', 'TapasForMaskedLM'), ('wav2vec2', 'Wav2Vec2ForMaskedLM'), ('xlm', 'XLMWithLMHeadModel'), ('xlm-roberta', 'XLMRobertaForMaskedLM'), ('xlm-roberta-xl', 'XLMRobertaXLForMaskedLM'), ('xmod', 'XmodForMaskedLM'), ('yoso', 'YosoForMaskedLM')]) MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict([('conditional_detr', 'ConditionalDetrForObjectDetection'), ('deformable_detr', 'DeformableDetrForObjectDetection'), ('deta', 'DetaForObjectDetection'), ('detr', 'DetrForObjectDetection'), ('rt_detr', 'RTDetrForObjectDetection'), ('table-transformer', 'TableTransformerForObjectDetection'), ('yolos', 'YolosForObjectDetection')]) MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict([('grounding-dino', 'GroundingDinoForObjectDetection'), ('owlv2', 'Owlv2ForObjectDetection'), ('owlvit', 'OwlViTForObjectDetection')]) MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict([('depth_anything', 'DepthAnythingForDepthEstimation'), ('dpt', 'DPTForDepthEstimation'), ('glpn', 'GLPNForDepthEstimation'), ('zoedepth', 'ZoeDepthForDepthEstimation')]) MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bart', 'BartForConditionalGeneration'), ('bigbird_pegasus', 'BigBirdPegasusForConditionalGeneration'), ('blenderbot', 'BlenderbotForConditionalGeneration'), ('blenderbot-small', 'BlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'EncoderDecoderModel'), ('fsmt', 'FSMTForConditionalGeneration'), ('gptsan-japanese', 'GPTSanJapaneseForConditionalGeneration'), ('led', 'LEDForConditionalGeneration'), ('longt5', 'LongT5ForConditionalGeneration'), ('m2m_100', 'M2M100ForConditionalGeneration'), ('marian', 'MarianMTModel'), ('mbart', 'MBartForConditionalGeneration'), ('mt5', 'MT5ForConditionalGeneration'), ('mvp', 'MvpForConditionalGeneration'), ('nllb-moe', 'NllbMoeForConditionalGeneration'), ('pegasus', 'PegasusForConditionalGeneration'), ('pegasus_x', 'PegasusXForConditionalGeneration'), ('plbart', 'PLBartForConditionalGeneration'), ('prophetnet', 'ProphetNetForConditionalGeneration'), ('qwen2_audio', 'Qwen2AudioForConditionalGeneration'), ('seamless_m4t', 'SeamlessM4TForTextToText'), ('seamless_m4t_v2', 'SeamlessM4Tv2ForTextToText'), ('switch_transformers', 'SwitchTransformersForConditionalGeneration'), ('t5', 'T5ForConditionalGeneration'), ('umt5', 'UMT5ForConditionalGeneration'), ('xlm-prophetnet', 'XLMProphetNetForConditionalGeneration')]) MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict([('pop2piano', 'Pop2PianoForConditionalGeneration'), ('seamless_m4t', 'SeamlessM4TForSpeechToText'), ('seamless_m4t_v2', 'SeamlessM4Tv2ForSpeechToText'), ('speech-encoder-decoder', 'SpeechEncoderDecoderModel'), ('speech_to_text', 'Speech2TextForConditionalGeneration'), ('speecht5', 'SpeechT5ForSpeechToText'), ('whisper', 'WhisperForConditionalGeneration')]) MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForSequenceClassification'), ('bart', 'BartForSequenceClassification'), ('bert', 'BertForSequenceClassification'), ('big_bird', 'BigBirdForSequenceClassification'), ('bigbird_pegasus', 'BigBirdPegasusForSequenceClassification'), ('biogpt', 'BioGptForSequenceClassification'), ('bloom', 'BloomForSequenceClassification'), ('camembert', 'CamembertForSequenceClassification'), ('canine', 'CanineForSequenceClassification'), ('code_llama', 'LlamaForSequenceClassification'), ('convbert', 'ConvBertForSequenceClassification'), ('ctrl', 'CTRLForSequenceClassification'), ('data2vec-text', 'Data2VecTextForSequenceClassification'), ('deberta', 'DebertaForSequenceClassification'), ('deberta-v2', 'DebertaV2ForSequenceClassification'), ('distilbert', 'DistilBertForSequenceClassification'), ('electra', 'ElectraForSequenceClassification'), ('ernie', 'ErnieForSequenceClassification'), ('ernie_m', 'ErnieMForSequenceClassification'), ('esm', 'EsmForSequenceClassification'), ('falcon', 'FalconForSequenceClassification'), ('flaubert', 'FlaubertForSequenceClassification'), ('fnet', 'FNetForSequenceClassification'), ('funnel', 'FunnelForSequenceClassification'), ('gemma', 'GemmaForSequenceClassification'), ('gemma2', 'Gemma2ForSequenceClassification'), ('gpt-sw3', 'GPT2ForSequenceClassification'), ('gpt2', 'GPT2ForSequenceClassification'), ('gpt_bigcode', 'GPTBigCodeForSequenceClassification'), ('gpt_neo', 'GPTNeoForSequenceClassification'), ('gpt_neox', 'GPTNeoXForSequenceClassification'), ('gptj', 'GPTJForSequenceClassification'), ('ibert', 'IBertForSequenceClassification'), ('jamba', 'JambaForSequenceClassification'), ('jetmoe', 'JetMoeForSequenceClassification'), ('layoutlm', 'LayoutLMForSequenceClassification'), ('layoutlmv2', 'LayoutLMv2ForSequenceClassification'), ('layoutlmv3', 'LayoutLMv3ForSequenceClassification'), ('led', 'LEDForSequenceClassification'), ('lilt', 'LiltForSequenceClassification'), ('llama', 'LlamaForSequenceClassification'), ('longformer', 'LongformerForSequenceClassification'), ('luke', 'LukeForSequenceClassification'), ('markuplm', 'MarkupLMForSequenceClassification'), ('mbart', 'MBartForSequenceClassification'), ('mega', 'MegaForSequenceClassification'), ('megatron-bert', 'MegatronBertForSequenceClassification'), ('mistral', 'MistralForSequenceClassification'), ('mixtral', 'MixtralForSequenceClassification'), ('mobilebert', 'MobileBertForSequenceClassification'), ('mpnet', 'MPNetForSequenceClassification'), ('mpt', 'MptForSequenceClassification'), ('mra', 'MraForSequenceClassification'), ('mt5', 'MT5ForSequenceClassification'), ('mvp', 'MvpForSequenceClassification'), ('nemotron', 'NemotronForSequenceClassification'), ('nezha', 'NezhaForSequenceClassification'), ('nystromformer', 'NystromformerForSequenceClassification'), ('open-llama', 'OpenLlamaForSequenceClassification'), ('openai-gpt', 'OpenAIGPTForSequenceClassification'), ('opt', 'OPTForSequenceClassification'), ('perceiver', 'PerceiverForSequenceClassification'), ('persimmon', 'PersimmonForSequenceClassification'), ('phi', 'PhiForSequenceClassification'), ('phi3', 'Phi3ForSequenceClassification'), ('plbart', 'PLBartForSequenceClassification'), ('qdqbert', 'QDQBertForSequenceClassification'), ('qwen2', 'Qwen2ForSequenceClassification'), ('qwen2_moe', 'Qwen2MoeForSequenceClassification'), ('reformer', 'ReformerForSequenceClassification'), ('rembert', 'RemBertForSequenceClassification'), ('roberta', 'RobertaForSequenceClassification'), ('roberta-prelayernorm', 'RobertaPreLayerNormForSequenceClassification'), ('roc_bert', 'RoCBertForSequenceClassification'), ('roformer', 'RoFormerForSequenceClassification'), ('squeezebert', 'SqueezeBertForSequenceClassification'), ('stablelm', 'StableLmForSequenceClassification'), ('starcoder2', 'Starcoder2ForSequenceClassification'), ('t5', 'T5ForSequenceClassification'), ('tapas', 'TapasForSequenceClassification'), ('transfo-xl', 'TransfoXLForSequenceClassification'), ('umt5', 'UMT5ForSequenceClassification'), ('xlm', 'XLMForSequenceClassification'), ('xlm-roberta', 'XLMRobertaForSequenceClassification'), ('xlm-roberta-xl', 'XLMRobertaXLForSequenceClassification'), ('xlnet', 'XLNetForSequenceClassification'), ('xmod', 'XmodForSequenceClassification'), ('yoso', 'YosoForSequenceClassification')]) MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForQuestionAnswering'), ('bart', 'BartForQuestionAnswering'), ('bert', 'BertForQuestionAnswering'), ('big_bird', 'BigBirdForQuestionAnswering'), ('bigbird_pegasus', 'BigBirdPegasusForQuestionAnswering'), ('bloom', 'BloomForQuestionAnswering'), ('camembert', 'CamembertForQuestionAnswering'), ('canine', 'CanineForQuestionAnswering'), ('convbert', 'ConvBertForQuestionAnswering'), ('data2vec-text', 'Data2VecTextForQuestionAnswering'), ('deberta', 'DebertaForQuestionAnswering'), ('deberta-v2', 'DebertaV2ForQuestionAnswering'), ('distilbert', 'DistilBertForQuestionAnswering'), ('electra', 'ElectraForQuestionAnswering'), ('ernie', 'ErnieForQuestionAnswering'), ('ernie_m', 'ErnieMForQuestionAnswering'), ('falcon', 'FalconForQuestionAnswering'), ('flaubert', 'FlaubertForQuestionAnsweringSimple'), ('fnet', 'FNetForQuestionAnswering'), ('funnel', 'FunnelForQuestionAnswering'), ('gpt2', 'GPT2ForQuestionAnswering'), ('gpt_neo', 'GPTNeoForQuestionAnswering'), ('gpt_neox', 'GPTNeoXForQuestionAnswering'), ('gptj', 'GPTJForQuestionAnswering'), ('ibert', 'IBertForQuestionAnswering'), ('layoutlmv2', 'LayoutLMv2ForQuestionAnswering'), ('layoutlmv3', 'LayoutLMv3ForQuestionAnswering'), ('led', 'LEDForQuestionAnswering'), ('lilt', 'LiltForQuestionAnswering'), ('llama', 'LlamaForQuestionAnswering'), ('longformer', 'LongformerForQuestionAnswering'), ('luke', 'LukeForQuestionAnswering'), ('lxmert', 'LxmertForQuestionAnswering'), ('markuplm', 'MarkupLMForQuestionAnswering'), ('mbart', 'MBartForQuestionAnswering'), ('mega', 'MegaForQuestionAnswering'), ('megatron-bert', 'MegatronBertForQuestionAnswering'), ('mobilebert', 'MobileBertForQuestionAnswering'), ('mpnet', 'MPNetForQuestionAnswering'), ('mpt', 'MptForQuestionAnswering'), ('mra', 'MraForQuestionAnswering'), ('mt5', 'MT5ForQuestionAnswering'), ('mvp', 'MvpForQuestionAnswering'), ('nemotron', 'NemotronForQuestionAnswering'), ('nezha', 'NezhaForQuestionAnswering'), ('nystromformer', 'NystromformerForQuestionAnswering'), ('opt', 'OPTForQuestionAnswering'), ('qdqbert', 'QDQBertForQuestionAnswering'), ('reformer', 'ReformerForQuestionAnswering'), ('rembert', 'RemBertForQuestionAnswering'), ('roberta', 'RobertaForQuestionAnswering'), ('roberta-prelayernorm', 'RobertaPreLayerNormForQuestionAnswering'), ('roc_bert', 'RoCBertForQuestionAnswering'), ('roformer', 'RoFormerForQuestionAnswering'), ('splinter', 'SplinterForQuestionAnswering'), ('squeezebert', 'SqueezeBertForQuestionAnswering'), ('t5', 'T5ForQuestionAnswering'), ('umt5', 'UMT5ForQuestionAnswering'), ('xlm', 'XLMForQuestionAnsweringSimple'), ('xlm-roberta', 'XLMRobertaForQuestionAnswering'), ('xlm-roberta-xl', 'XLMRobertaXLForQuestionAnswering'), ('xlnet', 'XLNetForQuestionAnsweringSimple'), ('xmod', 'XmodForQuestionAnswering'), ('yoso', 'YosoForQuestionAnswering')]) MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('tapas', 'TapasForQuestionAnswering')]) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('blip', 'BlipForQuestionAnswering'), ('blip-2', 'Blip2ForConditionalGeneration'), ('vilt', 'ViltForQuestionAnswering')]) MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('layoutlm', 'LayoutLMForQuestionAnswering'), ('layoutlmv2', 'LayoutLMv2ForQuestionAnswering'), ('layoutlmv3', 'LayoutLMv3ForQuestionAnswering')]) MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForTokenClassification'), ('bert', 'BertForTokenClassification'), ('big_bird', 'BigBirdForTokenClassification'), ('biogpt', 'BioGptForTokenClassification'), ('bloom', 'BloomForTokenClassification'), ('bros', 'BrosForTokenClassification'), ('camembert', 'CamembertForTokenClassification'), ('canine', 'CanineForTokenClassification'), ('convbert', 'ConvBertForTokenClassification'), ('data2vec-text', 'Data2VecTextForTokenClassification'), ('deberta', 'DebertaForTokenClassification'), ('deberta-v2', 'DebertaV2ForTokenClassification'), ('distilbert', 'DistilBertForTokenClassification'), ('electra', 'ElectraForTokenClassification'), ('ernie', 'ErnieForTokenClassification'), ('ernie_m', 'ErnieMForTokenClassification'), ('esm', 'EsmForTokenClassification'), ('falcon', 'FalconForTokenClassification'), ('flaubert', 'FlaubertForTokenClassification'), ('fnet', 'FNetForTokenClassification'), ('funnel', 'FunnelForTokenClassification'), ('gemma', 'GemmaForTokenClassification'), ('gemma2', 'Gemma2ForTokenClassification'), ('gpt-sw3', 'GPT2ForTokenClassification'), ('gpt2', 'GPT2ForTokenClassification'), ('gpt_bigcode', 'GPTBigCodeForTokenClassification'), ('gpt_neo', 'GPTNeoForTokenClassification'), ('gpt_neox', 'GPTNeoXForTokenClassification'), ('ibert', 'IBertForTokenClassification'), ('layoutlm', 'LayoutLMForTokenClassification'), ('layoutlmv2', 'LayoutLMv2ForTokenClassification'), ('layoutlmv3', 'LayoutLMv3ForTokenClassification'), ('lilt', 'LiltForTokenClassification'), ('llama', 'LlamaForTokenClassification'), ('longformer', 'LongformerForTokenClassification'), ('luke', 'LukeForTokenClassification'), ('markuplm', 'MarkupLMForTokenClassification'), ('mega', 'MegaForTokenClassification'), ('megatron-bert', 'MegatronBertForTokenClassification'), ('mistral', 'MistralForTokenClassification'), ('mixtral', 'MixtralForTokenClassification'), ('mobilebert', 'MobileBertForTokenClassification'), ('mpnet', 'MPNetForTokenClassification'), ('mpt', 'MptForTokenClassification'), ('mra', 'MraForTokenClassification'), ('mt5', 'MT5ForTokenClassification'), ('nemotron', 'NemotronForTokenClassification'), ('nezha', 'NezhaForTokenClassification'), ('nystromformer', 'NystromformerForTokenClassification'), ('persimmon', 'PersimmonForTokenClassification'), ('phi', 'PhiForTokenClassification'), ('phi3', 'Phi3ForTokenClassification'), ('qdqbert', 'QDQBertForTokenClassification'), ('qwen2', 'Qwen2ForTokenClassification'), ('qwen2_moe', 'Qwen2MoeForTokenClassification'), ('rembert', 'RemBertForTokenClassification'), ('roberta', 'RobertaForTokenClassification'), ('roberta-prelayernorm', 'RobertaPreLayerNormForTokenClassification'), ('roc_bert', 'RoCBertForTokenClassification'), ('roformer', 'RoFormerForTokenClassification'), ('squeezebert', 'SqueezeBertForTokenClassification'), ('stablelm', 'StableLmForTokenClassification'), ('starcoder2', 'Starcoder2ForTokenClassification'), ('t5', 'T5ForTokenClassification'), ('umt5', 'UMT5ForTokenClassification'), ('xlm', 'XLMForTokenClassification'), ('xlm-roberta', 'XLMRobertaForTokenClassification'), ('xlm-roberta-xl', 'XLMRobertaXLForTokenClassification'), ('xlnet', 'XLNetForTokenClassification'), ('xmod', 'XmodForTokenClassification'), ('yoso', 'YosoForTokenClassification')]) MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict([('albert', 'AlbertForMultipleChoice'), ('bert', 'BertForMultipleChoice'), ('big_bird', 'BigBirdForMultipleChoice'), ('camembert', 'CamembertForMultipleChoice'), ('canine', 'CanineForMultipleChoice'), ('convbert', 'ConvBertForMultipleChoice'), ('data2vec-text', 'Data2VecTextForMultipleChoice'), ('deberta-v2', 'DebertaV2ForMultipleChoice'), ('distilbert', 'DistilBertForMultipleChoice'), ('electra', 'ElectraForMultipleChoice'), ('ernie', 'ErnieForMultipleChoice'), ('ernie_m', 'ErnieMForMultipleChoice'), ('flaubert', 'FlaubertForMultipleChoice'), ('fnet', 'FNetForMultipleChoice'), ('funnel', 'FunnelForMultipleChoice'), ('ibert', 'IBertForMultipleChoice'), ('longformer', 'LongformerForMultipleChoice'), ('luke', 'LukeForMultipleChoice'), ('mega', 'MegaForMultipleChoice'), ('megatron-bert', 'MegatronBertForMultipleChoice'), ('mobilebert', 'MobileBertForMultipleChoice'), ('mpnet', 'MPNetForMultipleChoice'), ('mra', 'MraForMultipleChoice'), ('nezha', 'NezhaForMultipleChoice'), ('nystromformer', 'NystromformerForMultipleChoice'), ('qdqbert', 'QDQBertForMultipleChoice'), ('rembert', 'RemBertForMultipleChoice'), ('roberta', 'RobertaForMultipleChoice'), ('roberta-prelayernorm', 'RobertaPreLayerNormForMultipleChoice'), ('roc_bert', 'RoCBertForMultipleChoice'), ('roformer', 'RoFormerForMultipleChoice'), ('squeezebert', 'SqueezeBertForMultipleChoice'), ('xlm', 'XLMForMultipleChoice'), ('xlm-roberta', 'XLMRobertaForMultipleChoice'), ('xlm-roberta-xl', 'XLMRobertaXLForMultipleChoice'), ('xlnet', 'XLNetForMultipleChoice'), ('xmod', 'XmodForMultipleChoice'), ('yoso', 'YosoForMultipleChoice')]) MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict([('bert', 'BertForNextSentencePrediction'), ('ernie', 'ErnieForNextSentencePrediction'), ('fnet', 'FNetForNextSentencePrediction'), ('megatron-bert', 'MegatronBertForNextSentencePrediction'), ('mobilebert', 'MobileBertForNextSentencePrediction'), ('nezha', 'NezhaForNextSentencePrediction'), ('qdqbert', 'QDQBertForNextSentencePrediction')]) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('audio-spectrogram-transformer', 'ASTForAudioClassification'), ('data2vec-audio', 'Data2VecAudioForSequenceClassification'), ('hubert', 'HubertForSequenceClassification'), ('sew', 'SEWForSequenceClassification'), ('sew-d', 'SEWDForSequenceClassification'), ('unispeech', 'UniSpeechForSequenceClassification'), ('unispeech-sat', 'UniSpeechSatForSequenceClassification'), ('wav2vec2', 'Wav2Vec2ForSequenceClassification'), ('wav2vec2-bert', 'Wav2Vec2BertForSequenceClassification'), ('wav2vec2-conformer', 'Wav2Vec2ConformerForSequenceClassification'), ('wavlm', 'WavLMForSequenceClassification'), ('whisper', 'WhisperForAudioClassification')]) MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict([('data2vec-audio', 'Data2VecAudioForCTC'), ('hubert', 'HubertForCTC'), ('mctct', 'MCTCTForCTC'), ('sew', 'SEWForCTC'), ('sew-d', 'SEWDForCTC'), ('unispeech', 'UniSpeechForCTC'), ('unispeech-sat', 'UniSpeechSatForCTC'), ('wav2vec2', 'Wav2Vec2ForCTC'), ('wav2vec2-bert', 'Wav2Vec2BertForCTC'), ('wav2vec2-conformer', 'Wav2Vec2ConformerForCTC'), ('wavlm', 'WavLMForCTC')]) MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('data2vec-audio', 'Data2VecAudioForAudioFrameClassification'), ('unispeech-sat', 'UniSpeechSatForAudioFrameClassification'), ('wav2vec2', 'Wav2Vec2ForAudioFrameClassification'), ('wav2vec2-bert', 'Wav2Vec2BertForAudioFrameClassification'), ('wav2vec2-conformer', 'Wav2Vec2ConformerForAudioFrameClassification'), ('wavlm', 'WavLMForAudioFrameClassification')]) MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict([('data2vec-audio', 'Data2VecAudioForXVector'), ('unispeech-sat', 'UniSpeechSatForXVector'), ('wav2vec2', 'Wav2Vec2ForXVector'), ('wav2vec2-bert', 'Wav2Vec2BertForXVector'), ('wav2vec2-conformer', 'Wav2Vec2ConformerForXVector'), ('wavlm', 'WavLMForXVector')]) MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict([('fastspeech2_conformer', 'FastSpeech2ConformerModel'), ('speecht5', 'SpeechT5ForTextToSpeech')]) MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict([('bark', 'BarkModel'), ('fastspeech2_conformer', 'FastSpeech2ConformerWithHifiGan'), ('musicgen', 'MusicgenForConditionalGeneration'), ('musicgen_melody', 'MusicgenMelodyForConditionalGeneration'), ('seamless_m4t', 'SeamlessM4TForTextToSpeech'), ('seamless_m4t_v2', 'SeamlessM4Tv2ForTextToSpeech'), ('vits', 'VitsModel')]) MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('align', 'AlignModel'), ('altclip', 'AltCLIPModel'), ('blip', 'BlipModel'), ('blip-2', 'Blip2ForImageTextRetrieval'), ('chinese_clip', 'ChineseCLIPModel'), ('clip', 'CLIPModel'), ('clipseg', 'CLIPSegModel'), ('siglip', 'SiglipModel')]) MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict([('beit', 'BeitBackbone'), ('bit', 'BitBackbone'), ('convnext', 'ConvNextBackbone'), ('convnextv2', 'ConvNextV2Backbone'), ('dinat', 'DinatBackbone'), ('dinov2', 'Dinov2Backbone'), ('focalnet', 'FocalNetBackbone'), ('hiera', 'HieraBackbone'), ('maskformer-swin', 'MaskFormerSwinBackbone'), ('nat', 'NatBackbone'), ('pvt_v2', 'PvtV2Backbone'), ('resnet', 'ResNetBackbone'), ('rt_detr_resnet', 'RTDetrResNetBackbone'), ('swin', 'SwinBackbone'), ('swinv2', 'Swinv2Backbone'), ('timm_backbone', 'TimmBackbone'), ('vitdet', 'VitDetBackbone')]) MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict([('sam', 'SamModel')]) MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES = OrderedDict([('superpoint', 'SuperPointForKeypointDetection')]) MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict([('albert', 'AlbertModel'), ('bert', 'BertModel'), ('big_bird', 'BigBirdModel'), ('data2vec-text', 'Data2VecTextModel'), ('deberta', 'DebertaModel'), ('deberta-v2', 'DebertaV2Model'), ('distilbert', 'DistilBertModel'), ('electra', 'ElectraModel'), ('flaubert', 'FlaubertModel'), ('ibert', 'IBertModel'), ('longformer', 'LongformerModel'), ('mobilebert', 'MobileBertModel'), ('mt5', 'MT5EncoderModel'), ('nystromformer', 'NystromformerModel'), ('reformer', 'ReformerModel'), ('rembert', 'RemBertModel'), ('roberta', 'RobertaModel'), ('roberta-prelayernorm', 'RobertaPreLayerNormModel'), ('roc_bert', 'RoCBertModel'), ('roformer', 'RoFormerModel'), ('squeezebert', 'SqueezeBertModel'), ('t5', 'T5EncoderModel'), ('umt5', 'UMT5EncoderModel'), ('xlm', 'XLMModel'), ('xlm-roberta', 'XLMRobertaModel'), ('xlm-roberta-xl', 'XLMRobertaXLModel')]) MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('patchtsmixer', 'PatchTSMixerForTimeSeriesClassification'), ('patchtst', 'PatchTSTForClassification')]) MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict([('patchtsmixer', 'PatchTSMixerForRegression'), ('patchtst', 'PatchTSTForRegression')]) MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict([('swin2sr', 'Swin2SRForImageSuperResolution')]) MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES) MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES) MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES) MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES) MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES) MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES) MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES) MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES) MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES) MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) MODEL_FOR_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES) MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES) MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES) MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES) MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES) MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES) MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES) MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES) MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES) MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES) MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES) MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) MODEL_FOR_KEYPOINT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES) MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES) MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES) MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) class AutoModelForMaskGeneration(_BaseAutoModelClass): _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING class AutoModelForKeypointDetection(_BaseAutoModelClass): _model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING class AutoModelForTextEncoding(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING class AutoModelForImageToImage(_BaseAutoModelClass): _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING class AutoModel(_BaseAutoModelClass): _model_mapping = MODEL_MAPPING AutoModel = auto_class_update(AutoModel) class AutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = MODEL_FOR_PRETRAINING_MAPPING AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc='pretraining') class _AutoModelWithLMHead(_BaseAutoModelClass): _model_mapping = MODEL_WITH_LM_HEAD_MAPPING _AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc='language modeling') class AutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc='causal language modeling') class AutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = MODEL_FOR_MASKED_LM_MAPPING AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc='masked language modeling') class AutoModelForSeq2SeqLM(_BaseAutoModelClass): _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING AutoModelForSeq2SeqLM = auto_class_update(AutoModelForSeq2SeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='google-t5/t5-base') class AutoModelForSequenceClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING AutoModelForSequenceClassification = auto_class_update(AutoModelForSequenceClassification, head_doc='sequence classification') class AutoModelForQuestionAnswering(_BaseAutoModelClass): _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc='question answering') class AutoModelForTableQuestionAnswering(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING AutoModelForTableQuestionAnswering = auto_class_update(AutoModelForTableQuestionAnswering, head_doc='table question answering', checkpoint_for_example='google/tapas-base-finetuned-wtq') class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING AutoModelForVisualQuestionAnswering = auto_class_update(AutoModelForVisualQuestionAnswering, head_doc='visual question answering', checkpoint_for_example='dandelin/vilt-b32-finetuned-vqa') class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING AutoModelForDocumentQuestionAnswering = auto_class_update(AutoModelForDocumentQuestionAnswering, head_doc='document question answering', checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3') class AutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc='token classification') class AutoModelForMultipleChoice(_BaseAutoModelClass): _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc='multiple choice') class AutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING AutoModelForNextSentencePrediction = auto_class_update(AutoModelForNextSentencePrediction, head_doc='next sentence prediction') class AutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc='image classification') class AutoModelForZeroShotImageClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING AutoModelForZeroShotImageClassification = auto_class_update(AutoModelForZeroShotImageClassification, head_doc='zero-shot image classification') class AutoModelForImageSegmentation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc='image segmentation') class AutoModelForSemanticSegmentation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING AutoModelForSemanticSegmentation = auto_class_update(AutoModelForSemanticSegmentation, head_doc='semantic segmentation') class AutoModelForUniversalSegmentation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING AutoModelForUniversalSegmentation = auto_class_update(AutoModelForUniversalSegmentation, head_doc='universal image segmentation') class AutoModelForInstanceSegmentation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING AutoModelForInstanceSegmentation = auto_class_update(AutoModelForInstanceSegmentation, head_doc='instance segmentation') class AutoModelForObjectDetection(_BaseAutoModelClass): _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc='object detection') class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING AutoModelForZeroShotObjectDetection = auto_class_update(AutoModelForZeroShotObjectDetection, head_doc='zero-shot object detection') class AutoModelForDepthEstimation(_BaseAutoModelClass): _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc='depth estimation') class AutoModelForVideoClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc='video classification') class AutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc='vision-to-text modeling') class AutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc='audio classification') class AutoModelForCTC(_BaseAutoModelClass): _model_mapping = MODEL_FOR_CTC_MAPPING AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc='connectionist temporal classification') class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass): _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING AutoModelForSpeechSeq2Seq = auto_class_update(AutoModelForSpeechSeq2Seq, head_doc='sequence-to-sequence speech-to-text modeling') class AutoModelForAudioFrameClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING AutoModelForAudioFrameClassification = auto_class_update(AutoModelForAudioFrameClassification, head_doc='audio frame (token) classification') class AutoModelForAudioXVector(_BaseAutoModelClass): _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING class AutoModelForTextToSpectrogram(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING class AutoModelForTextToWaveform(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING class AutoBackbone(_BaseAutoBackboneClass): _model_mapping = MODEL_FOR_BACKBONE_MAPPING AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc='audio retrieval via x-vector') class AutoModelForMaskedImageModeling(_BaseAutoModelClass): _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc='masked image modeling') class AutoModelWithLMHead(_AutoModelWithLMHead): @classmethod def from_config(cls, config): warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning) return super().from_config(config) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) # File: transformers-main/src/transformers/models/auto/modeling_flax_auto.py """""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES logger = logging.get_logger(__name__) FLAX_MODEL_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('bloom', 'FlaxBloomModel'), ('clip', 'FlaxCLIPModel'), ('dinov2', 'FlaxDinov2Model'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gemma', 'FlaxGemmaModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('llama', 'FlaxLlamaModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mistral', 'FlaxMistralModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel')]) FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM')]) FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM')]) FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration')]) FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('beit', 'FlaxBeitForImageClassification'), ('dinov2', 'FlaxDinov2ForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification')]) FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict([('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel')]) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('bloom', 'FlaxBloomForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gemma', 'FlaxGemmaForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('llama', 'FlaxLlamaForCausalLM'), ('mistral', 'FlaxMistralForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM')]) FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification')]) FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering')]) FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification')]) FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict([('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice')]) FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict([('bert', 'FlaxBertForNextSentencePrediction')]) FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict([('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration')]) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('whisper', 'FlaxWhisperForAudioClassification')]) FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES) FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES) FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES) FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES) FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES) class FlaxAutoModel(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_MAPPING FlaxAutoModel = auto_class_update(FlaxAutoModel) class FlaxAutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class FlaxAutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class FlaxAutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING FlaxAutoModelForSeq2SeqLM = auto_class_update(FlaxAutoModelForSeq2SeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='google-t5/t5-base') class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING FlaxAutoModelForSequenceClassification = auto_class_update(FlaxAutoModelForSequenceClassification, head_doc='sequence classification') class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class FlaxAutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING FlaxAutoModelForTokenClassification = auto_class_update(FlaxAutoModelForTokenClassification, head_doc='token classification') class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING FlaxAutoModelForNextSentencePrediction = auto_class_update(FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction') class FlaxAutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING FlaxAutoModelForImageClassification = auto_class_update(FlaxAutoModelForImageClassification, head_doc='image classification') class FlaxAutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc='vision-to-text modeling') class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING FlaxAutoModelForSpeechSeq2Seq = auto_class_update(FlaxAutoModelForSpeechSeq2Seq, head_doc='sequence-to-sequence speech-to-text modeling') # File: transformers-main/src/transformers/models/auto/modeling_tf_auto.py """""" import warnings from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES logger = logging.get_logger(__name__) TF_MODEL_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertModel'), ('bart', 'TFBartModel'), ('bert', 'TFBertModel'), ('blenderbot', 'TFBlenderbotModel'), ('blenderbot-small', 'TFBlenderbotSmallModel'), ('blip', 'TFBlipModel'), ('camembert', 'TFCamembertModel'), ('clip', 'TFCLIPModel'), ('convbert', 'TFConvBertModel'), ('convnext', 'TFConvNextModel'), ('convnextv2', 'TFConvNextV2Model'), ('ctrl', 'TFCTRLModel'), ('cvt', 'TFCvtModel'), ('data2vec-vision', 'TFData2VecVisionModel'), ('deberta', 'TFDebertaModel'), ('deberta-v2', 'TFDebertaV2Model'), ('deit', 'TFDeiTModel'), ('distilbert', 'TFDistilBertModel'), ('dpr', 'TFDPRQuestionEncoder'), ('efficientformer', 'TFEfficientFormerModel'), ('electra', 'TFElectraModel'), ('esm', 'TFEsmModel'), ('flaubert', 'TFFlaubertModel'), ('funnel', ('TFFunnelModel', 'TFFunnelBaseModel')), ('gpt-sw3', 'TFGPT2Model'), ('gpt2', 'TFGPT2Model'), ('gptj', 'TFGPTJModel'), ('groupvit', 'TFGroupViTModel'), ('hubert', 'TFHubertModel'), ('idefics', 'TFIdeficsModel'), ('layoutlm', 'TFLayoutLMModel'), ('layoutlmv3', 'TFLayoutLMv3Model'), ('led', 'TFLEDModel'), ('longformer', 'TFLongformerModel'), ('lxmert', 'TFLxmertModel'), ('marian', 'TFMarianModel'), ('mbart', 'TFMBartModel'), ('mistral', 'TFMistralModel'), ('mobilebert', 'TFMobileBertModel'), ('mobilevit', 'TFMobileViTModel'), ('mpnet', 'TFMPNetModel'), ('mt5', 'TFMT5Model'), ('openai-gpt', 'TFOpenAIGPTModel'), ('opt', 'TFOPTModel'), ('pegasus', 'TFPegasusModel'), ('regnet', 'TFRegNetModel'), ('rembert', 'TFRemBertModel'), ('resnet', 'TFResNetModel'), ('roberta', 'TFRobertaModel'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormModel'), ('roformer', 'TFRoFormerModel'), ('sam', 'TFSamModel'), ('segformer', 'TFSegformerModel'), ('speech_to_text', 'TFSpeech2TextModel'), ('swiftformer', 'TFSwiftFormerModel'), ('swin', 'TFSwinModel'), ('t5', 'TFT5Model'), ('tapas', 'TFTapasModel'), ('transfo-xl', 'TFTransfoXLModel'), ('vision-text-dual-encoder', 'TFVisionTextDualEncoderModel'), ('vit', 'TFViTModel'), ('vit_mae', 'TFViTMAEModel'), ('wav2vec2', 'TFWav2Vec2Model'), ('whisper', 'TFWhisperModel'), ('xglm', 'TFXGLMModel'), ('xlm', 'TFXLMModel'), ('xlm-roberta', 'TFXLMRobertaModel'), ('xlnet', 'TFXLNetModel')]) TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForPreTraining'), ('bart', 'TFBartForConditionalGeneration'), ('bert', 'TFBertForPreTraining'), ('camembert', 'TFCamembertForMaskedLM'), ('ctrl', 'TFCTRLLMHeadModel'), ('distilbert', 'TFDistilBertForMaskedLM'), ('electra', 'TFElectraForPreTraining'), ('flaubert', 'TFFlaubertWithLMHeadModel'), ('funnel', 'TFFunnelForPreTraining'), ('gpt-sw3', 'TFGPT2LMHeadModel'), ('gpt2', 'TFGPT2LMHeadModel'), ('idefics', 'TFIdeficsForVisionText2Text'), ('layoutlm', 'TFLayoutLMForMaskedLM'), ('lxmert', 'TFLxmertForPreTraining'), ('mobilebert', 'TFMobileBertForPreTraining'), ('mpnet', 'TFMPNetForMaskedLM'), ('openai-gpt', 'TFOpenAIGPTLMHeadModel'), ('roberta', 'TFRobertaForMaskedLM'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForMaskedLM'), ('t5', 'TFT5ForConditionalGeneration'), ('tapas', 'TFTapasForMaskedLM'), ('transfo-xl', 'TFTransfoXLLMHeadModel'), ('vit_mae', 'TFViTMAEForPreTraining'), ('xlm', 'TFXLMWithLMHeadModel'), ('xlm-roberta', 'TFXLMRobertaForMaskedLM'), ('xlnet', 'TFXLNetLMHeadModel')]) TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForMaskedLM'), ('bart', 'TFBartForConditionalGeneration'), ('bert', 'TFBertForMaskedLM'), ('camembert', 'TFCamembertForMaskedLM'), ('convbert', 'TFConvBertForMaskedLM'), ('ctrl', 'TFCTRLLMHeadModel'), ('distilbert', 'TFDistilBertForMaskedLM'), ('electra', 'TFElectraForMaskedLM'), ('esm', 'TFEsmForMaskedLM'), ('flaubert', 'TFFlaubertWithLMHeadModel'), ('funnel', 'TFFunnelForMaskedLM'), ('gpt-sw3', 'TFGPT2LMHeadModel'), ('gpt2', 'TFGPT2LMHeadModel'), ('gptj', 'TFGPTJForCausalLM'), ('layoutlm', 'TFLayoutLMForMaskedLM'), ('led', 'TFLEDForConditionalGeneration'), ('longformer', 'TFLongformerForMaskedLM'), ('marian', 'TFMarianMTModel'), ('mobilebert', 'TFMobileBertForMaskedLM'), ('mpnet', 'TFMPNetForMaskedLM'), ('openai-gpt', 'TFOpenAIGPTLMHeadModel'), ('rembert', 'TFRemBertForMaskedLM'), ('roberta', 'TFRobertaForMaskedLM'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForMaskedLM'), ('roformer', 'TFRoFormerForMaskedLM'), ('speech_to_text', 'TFSpeech2TextForConditionalGeneration'), ('t5', 'TFT5ForConditionalGeneration'), ('tapas', 'TFTapasForMaskedLM'), ('transfo-xl', 'TFTransfoXLLMHeadModel'), ('whisper', 'TFWhisperForConditionalGeneration'), ('xlm', 'TFXLMWithLMHeadModel'), ('xlm-roberta', 'TFXLMRobertaForMaskedLM'), ('xlnet', 'TFXLNetLMHeadModel')]) TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bert', 'TFBertLMHeadModel'), ('camembert', 'TFCamembertForCausalLM'), ('ctrl', 'TFCTRLLMHeadModel'), ('gpt-sw3', 'TFGPT2LMHeadModel'), ('gpt2', 'TFGPT2LMHeadModel'), ('gptj', 'TFGPTJForCausalLM'), ('mistral', 'TFMistralForCausalLM'), ('openai-gpt', 'TFOpenAIGPTLMHeadModel'), ('opt', 'TFOPTForCausalLM'), ('rembert', 'TFRemBertForCausalLM'), ('roberta', 'TFRobertaForCausalLM'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForCausalLM'), ('roformer', 'TFRoFormerForCausalLM'), ('transfo-xl', 'TFTransfoXLLMHeadModel'), ('xglm', 'TFXGLMForCausalLM'), ('xlm', 'TFXLMWithLMHeadModel'), ('xlm-roberta', 'TFXLMRobertaForCausalLM'), ('xlnet', 'TFXLNetLMHeadModel')]) TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict([('deit', 'TFDeiTForMaskedImageModeling'), ('swin', 'TFSwinForMaskedImageModeling')]) TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('convnext', 'TFConvNextForImageClassification'), ('convnextv2', 'TFConvNextV2ForImageClassification'), ('cvt', 'TFCvtForImageClassification'), ('data2vec-vision', 'TFData2VecVisionForImageClassification'), ('deit', ('TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher')), ('efficientformer', ('TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher')), ('mobilevit', 'TFMobileViTForImageClassification'), ('regnet', 'TFRegNetForImageClassification'), ('resnet', 'TFResNetForImageClassification'), ('segformer', 'TFSegformerForImageClassification'), ('swiftformer', 'TFSwiftFormerForImageClassification'), ('swin', 'TFSwinForImageClassification'), ('vit', 'TFViTForImageClassification')]) TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('blip', 'TFBlipModel'), ('clip', 'TFCLIPModel')]) TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict([('data2vec-vision', 'TFData2VecVisionForSemanticSegmentation'), ('mobilevit', 'TFMobileViTForSemanticSegmentation'), ('segformer', 'TFSegformerForSemanticSegmentation')]) TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict([('blip', 'TFBlipForConditionalGeneration'), ('vision-encoder-decoder', 'TFVisionEncoderDecoderModel')]) TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForMaskedLM'), ('bert', 'TFBertForMaskedLM'), ('camembert', 'TFCamembertForMaskedLM'), ('convbert', 'TFConvBertForMaskedLM'), ('deberta', 'TFDebertaForMaskedLM'), ('deberta-v2', 'TFDebertaV2ForMaskedLM'), ('distilbert', 'TFDistilBertForMaskedLM'), ('electra', 'TFElectraForMaskedLM'), ('esm', 'TFEsmForMaskedLM'), ('flaubert', 'TFFlaubertWithLMHeadModel'), ('funnel', 'TFFunnelForMaskedLM'), ('layoutlm', 'TFLayoutLMForMaskedLM'), ('longformer', 'TFLongformerForMaskedLM'), ('mobilebert', 'TFMobileBertForMaskedLM'), ('mpnet', 'TFMPNetForMaskedLM'), ('rembert', 'TFRemBertForMaskedLM'), ('roberta', 'TFRobertaForMaskedLM'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForMaskedLM'), ('roformer', 'TFRoFormerForMaskedLM'), ('tapas', 'TFTapasForMaskedLM'), ('xlm', 'TFXLMWithLMHeadModel'), ('xlm-roberta', 'TFXLMRobertaForMaskedLM')]) TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict([('bart', 'TFBartForConditionalGeneration'), ('blenderbot', 'TFBlenderbotForConditionalGeneration'), ('blenderbot-small', 'TFBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'TFEncoderDecoderModel'), ('led', 'TFLEDForConditionalGeneration'), ('marian', 'TFMarianMTModel'), ('mbart', 'TFMBartForConditionalGeneration'), ('mt5', 'TFMT5ForConditionalGeneration'), ('pegasus', 'TFPegasusForConditionalGeneration'), ('t5', 'TFT5ForConditionalGeneration')]) TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict([('speech_to_text', 'TFSpeech2TextForConditionalGeneration'), ('whisper', 'TFWhisperForConditionalGeneration')]) TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForSequenceClassification'), ('bart', 'TFBartForSequenceClassification'), ('bert', 'TFBertForSequenceClassification'), ('camembert', 'TFCamembertForSequenceClassification'), ('convbert', 'TFConvBertForSequenceClassification'), ('ctrl', 'TFCTRLForSequenceClassification'), ('deberta', 'TFDebertaForSequenceClassification'), ('deberta-v2', 'TFDebertaV2ForSequenceClassification'), ('distilbert', 'TFDistilBertForSequenceClassification'), ('electra', 'TFElectraForSequenceClassification'), ('esm', 'TFEsmForSequenceClassification'), ('flaubert', 'TFFlaubertForSequenceClassification'), ('funnel', 'TFFunnelForSequenceClassification'), ('gpt-sw3', 'TFGPT2ForSequenceClassification'), ('gpt2', 'TFGPT2ForSequenceClassification'), ('gptj', 'TFGPTJForSequenceClassification'), ('layoutlm', 'TFLayoutLMForSequenceClassification'), ('layoutlmv3', 'TFLayoutLMv3ForSequenceClassification'), ('longformer', 'TFLongformerForSequenceClassification'), ('mistral', 'TFMistralForSequenceClassification'), ('mobilebert', 'TFMobileBertForSequenceClassification'), ('mpnet', 'TFMPNetForSequenceClassification'), ('openai-gpt', 'TFOpenAIGPTForSequenceClassification'), ('rembert', 'TFRemBertForSequenceClassification'), ('roberta', 'TFRobertaForSequenceClassification'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForSequenceClassification'), ('roformer', 'TFRoFormerForSequenceClassification'), ('tapas', 'TFTapasForSequenceClassification'), ('transfo-xl', 'TFTransfoXLForSequenceClassification'), ('xlm', 'TFXLMForSequenceClassification'), ('xlm-roberta', 'TFXLMRobertaForSequenceClassification'), ('xlnet', 'TFXLNetForSequenceClassification')]) TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForQuestionAnswering'), ('bert', 'TFBertForQuestionAnswering'), ('camembert', 'TFCamembertForQuestionAnswering'), ('convbert', 'TFConvBertForQuestionAnswering'), ('deberta', 'TFDebertaForQuestionAnswering'), ('deberta-v2', 'TFDebertaV2ForQuestionAnswering'), ('distilbert', 'TFDistilBertForQuestionAnswering'), ('electra', 'TFElectraForQuestionAnswering'), ('flaubert', 'TFFlaubertForQuestionAnsweringSimple'), ('funnel', 'TFFunnelForQuestionAnswering'), ('gptj', 'TFGPTJForQuestionAnswering'), ('layoutlmv3', 'TFLayoutLMv3ForQuestionAnswering'), ('longformer', 'TFLongformerForQuestionAnswering'), ('mobilebert', 'TFMobileBertForQuestionAnswering'), ('mpnet', 'TFMPNetForQuestionAnswering'), ('rembert', 'TFRemBertForQuestionAnswering'), ('roberta', 'TFRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'TFRoFormerForQuestionAnswering'), ('xlm', 'TFXLMForQuestionAnsweringSimple'), ('xlm-roberta', 'TFXLMRobertaForQuestionAnswering'), ('xlnet', 'TFXLNetForQuestionAnsweringSimple')]) TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('wav2vec2', 'TFWav2Vec2ForSequenceClassification')]) TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('layoutlm', 'TFLayoutLMForQuestionAnswering'), ('layoutlmv3', 'TFLayoutLMv3ForQuestionAnswering')]) TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict([('tapas', 'TFTapasForQuestionAnswering')]) TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForTokenClassification'), ('bert', 'TFBertForTokenClassification'), ('camembert', 'TFCamembertForTokenClassification'), ('convbert', 'TFConvBertForTokenClassification'), ('deberta', 'TFDebertaForTokenClassification'), ('deberta-v2', 'TFDebertaV2ForTokenClassification'), ('distilbert', 'TFDistilBertForTokenClassification'), ('electra', 'TFElectraForTokenClassification'), ('esm', 'TFEsmForTokenClassification'), ('flaubert', 'TFFlaubertForTokenClassification'), ('funnel', 'TFFunnelForTokenClassification'), ('layoutlm', 'TFLayoutLMForTokenClassification'), ('layoutlmv3', 'TFLayoutLMv3ForTokenClassification'), ('longformer', 'TFLongformerForTokenClassification'), ('mobilebert', 'TFMobileBertForTokenClassification'), ('mpnet', 'TFMPNetForTokenClassification'), ('rembert', 'TFRemBertForTokenClassification'), ('roberta', 'TFRobertaForTokenClassification'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForTokenClassification'), ('roformer', 'TFRoFormerForTokenClassification'), ('xlm', 'TFXLMForTokenClassification'), ('xlm-roberta', 'TFXLMRobertaForTokenClassification'), ('xlnet', 'TFXLNetForTokenClassification')]) TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertForMultipleChoice'), ('bert', 'TFBertForMultipleChoice'), ('camembert', 'TFCamembertForMultipleChoice'), ('convbert', 'TFConvBertForMultipleChoice'), ('deberta-v2', 'TFDebertaV2ForMultipleChoice'), ('distilbert', 'TFDistilBertForMultipleChoice'), ('electra', 'TFElectraForMultipleChoice'), ('flaubert', 'TFFlaubertForMultipleChoice'), ('funnel', 'TFFunnelForMultipleChoice'), ('longformer', 'TFLongformerForMultipleChoice'), ('mobilebert', 'TFMobileBertForMultipleChoice'), ('mpnet', 'TFMPNetForMultipleChoice'), ('rembert', 'TFRemBertForMultipleChoice'), ('roberta', 'TFRobertaForMultipleChoice'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormForMultipleChoice'), ('roformer', 'TFRoFormerForMultipleChoice'), ('xlm', 'TFXLMForMultipleChoice'), ('xlm-roberta', 'TFXLMRobertaForMultipleChoice'), ('xlnet', 'TFXLNetForMultipleChoice')]) TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict([('bert', 'TFBertForNextSentencePrediction'), ('mobilebert', 'TFMobileBertForNextSentencePrediction')]) TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict([('sam', 'TFSamModel')]) TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict([('albert', 'TFAlbertModel'), ('bert', 'TFBertModel'), ('convbert', 'TFConvBertModel'), ('deberta', 'TFDebertaModel'), ('deberta-v2', 'TFDebertaV2Model'), ('distilbert', 'TFDistilBertModel'), ('electra', 'TFElectraModel'), ('flaubert', 'TFFlaubertModel'), ('longformer', 'TFLongformerModel'), ('mobilebert', 'TFMobileBertModel'), ('mt5', 'TFMT5EncoderModel'), ('rembert', 'TFRemBertModel'), ('roberta', 'TFRobertaModel'), ('roberta-prelayernorm', 'TFRobertaPreLayerNormModel'), ('roformer', 'TFRoFormerModel'), ('t5', 'TFT5EncoderModel'), ('xlm', 'TFXLMModel'), ('xlm-roberta', 'TFXLMRobertaModel')]) TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES) TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES) TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES) TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES) TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES) TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES) TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES) TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES) TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES) TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES) TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES) TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES) TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES) TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES) TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) class TFAutoModelForMaskGeneration(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING class TFAutoModelForTextEncoding(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING class TFAutoModel(_BaseAutoModelClass): _model_mapping = TF_MODEL_MAPPING TFAutoModel = auto_class_update(TFAutoModel) class TFAutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING TFAutoModelForAudioClassification = auto_class_update(TFAutoModelForAudioClassification, head_doc='audio classification') class TFAutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc='pretraining') class _TFAutoModelWithLMHead(_BaseAutoModelClass): _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING _TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc='language modeling') class TFAutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc='causal language modeling') class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING TFAutoModelForMaskedImageModeling = auto_class_update(TFAutoModelForMaskedImageModeling, head_doc='masked image modeling') class TFAutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING TFAutoModelForImageClassification = auto_class_update(TFAutoModelForImageClassification, head_doc='image classification') class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING TFAutoModelForZeroShotImageClassification = auto_class_update(TFAutoModelForZeroShotImageClassification, head_doc='zero-shot image classification') class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING TFAutoModelForSemanticSegmentation = auto_class_update(TFAutoModelForSemanticSegmentation, head_doc='semantic segmentation') class TFAutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc='vision-to-text modeling') class TFAutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc='masked language modeling') class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING TFAutoModelForSeq2SeqLM = auto_class_update(TFAutoModelForSeq2SeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='google-t5/t5-base') class TFAutoModelForSequenceClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING TFAutoModelForSequenceClassification = auto_class_update(TFAutoModelForSequenceClassification, head_doc='sequence classification') class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc='question answering') class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING TFAutoModelForDocumentQuestionAnswering = auto_class_update(TFAutoModelForDocumentQuestionAnswering, head_doc='document question answering', checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3') class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING TFAutoModelForTableQuestionAnswering = auto_class_update(TFAutoModelForTableQuestionAnswering, head_doc='table question answering', checkpoint_for_example='google/tapas-base-finetuned-wtq') class TFAutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING TFAutoModelForTokenClassification = auto_class_update(TFAutoModelForTokenClassification, head_doc='token classification') class TFAutoModelForMultipleChoice(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc='multiple choice') class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING TFAutoModelForNextSentencePrediction = auto_class_update(TFAutoModelForNextSentencePrediction, head_doc='next sentence prediction') class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING TFAutoModelForSpeechSeq2Seq = auto_class_update(TFAutoModelForSpeechSeq2Seq, head_doc='sequence-to-sequence speech-to-text modeling') class TFAutoModelWithLMHead(_TFAutoModelWithLMHead): @classmethod def from_config(cls, config): warnings.warn('The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning) return super().from_config(config) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): warnings.warn('The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) # File: transformers-main/src/transformers/models/auto/processing_auto.py """""" import importlib import inspect import json import os import warnings from collections import OrderedDict from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...image_processing_utils import ImageProcessingMixin from ...processing_utils import ProcessorMixin from ...tokenization_utils import TOKENIZER_CONFIG_FILE from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings from .feature_extraction_auto import AutoFeatureExtractor from .image_processing_auto import AutoImageProcessor from .tokenization_auto import AutoTokenizer logger = logging.get_logger(__name__) PROCESSOR_MAPPING_NAMES = OrderedDict([('align', 'AlignProcessor'), ('altclip', 'AltCLIPProcessor'), ('bark', 'BarkProcessor'), ('blip', 'BlipProcessor'), ('blip-2', 'Blip2Processor'), ('bridgetower', 'BridgeTowerProcessor'), ('chameleon', 'ChameleonProcessor'), ('chinese_clip', 'ChineseCLIPProcessor'), ('clap', 'ClapProcessor'), ('clip', 'CLIPProcessor'), ('clipseg', 'CLIPSegProcessor'), ('clvp', 'ClvpProcessor'), ('flava', 'FlavaProcessor'), ('fuyu', 'FuyuProcessor'), ('git', 'GitProcessor'), ('grounding-dino', 'GroundingDinoProcessor'), ('groupvit', 'CLIPProcessor'), ('hubert', 'Wav2Vec2Processor'), ('idefics', 'IdeficsProcessor'), ('idefics2', 'Idefics2Processor'), ('instructblip', 'InstructBlipProcessor'), ('instructblipvideo', 'InstructBlipVideoProcessor'), ('kosmos-2', 'Kosmos2Processor'), ('layoutlmv2', 'LayoutLMv2Processor'), ('layoutlmv3', 'LayoutLMv3Processor'), ('llava', 'LlavaProcessor'), ('llava_next', 'LlavaNextProcessor'), ('llava_next_video', 'LlavaNextVideoProcessor'), ('llava_onevision', 'LlavaOnevisionProcessor'), ('markuplm', 'MarkupLMProcessor'), ('mctct', 'MCTCTProcessor'), ('mgp-str', 'MgpstrProcessor'), ('oneformer', 'OneFormerProcessor'), ('owlv2', 'Owlv2Processor'), ('owlvit', 'OwlViTProcessor'), ('paligemma', 'PaliGemmaProcessor'), ('pix2struct', 'Pix2StructProcessor'), ('pixtral', 'PixtralProcessor'), ('pop2piano', 'Pop2PianoProcessor'), ('qwen2_audio', 'Qwen2AudioProcessor'), ('qwen2_vl', 'Qwen2VLProcessor'), ('sam', 'SamProcessor'), ('seamless_m4t', 'SeamlessM4TProcessor'), ('sew', 'Wav2Vec2Processor'), ('sew-d', 'Wav2Vec2Processor'), ('siglip', 'SiglipProcessor'), ('speech_to_text', 'Speech2TextProcessor'), ('speech_to_text_2', 'Speech2Text2Processor'), ('speecht5', 'SpeechT5Processor'), ('trocr', 'TrOCRProcessor'), ('tvlt', 'TvltProcessor'), ('tvp', 'TvpProcessor'), ('unispeech', 'Wav2Vec2Processor'), ('unispeech-sat', 'Wav2Vec2Processor'), ('video_llava', 'VideoLlavaProcessor'), ('vilt', 'ViltProcessor'), ('vipllava', 'LlavaProcessor'), ('vision-text-dual-encoder', 'VisionTextDualEncoderProcessor'), ('wav2vec2', 'Wav2Vec2Processor'), ('wav2vec2-bert', 'Wav2Vec2Processor'), ('wav2vec2-conformer', 'Wav2Vec2Processor'), ('wavlm', 'Wav2Vec2Processor'), ('whisper', 'WhisperProcessor'), ('xclip', 'XCLIPProcessor')]) PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES) def processor_class_from_name(class_name: str): for (module_name, processors) in PROCESSOR_MAPPING_NAMES.items(): if class_name in processors: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f'.{module_name}', 'transformers.models') try: return getattr(module, class_name) except AttributeError: continue for processor in PROCESSOR_MAPPING._extra_content.values(): if getattr(processor, '__name__', None) == class_name: return processor main_module = importlib.import_module('transformers') if hasattr(main_module, class_name): return getattr(main_module, class_name) return None class AutoProcessor: def __init__(self): raise EnvironmentError('AutoProcessor is designed to be instantiated using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token config = kwargs.pop('config', None) trust_remote_code = kwargs.pop('trust_remote_code', None) kwargs['_from_auto'] = True processor_class = None processor_auto_map = None get_file_from_repo_kwargs = {key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs} processor_config_file = get_file_from_repo(pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs) if processor_config_file is not None: (config_dict, _) = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs) processor_class = config_dict.get('processor_class', None) if 'AutoProcessor' in config_dict.get('auto_map', {}): processor_auto_map = config_dict['auto_map']['AutoProcessor'] if processor_class is None: preprocessor_config_file = get_file_from_repo(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs) if preprocessor_config_file is not None: (config_dict, _) = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) processor_class = config_dict.get('processor_class', None) if 'AutoProcessor' in config_dict.get('auto_map', {}): processor_auto_map = config_dict['auto_map']['AutoProcessor'] if preprocessor_config_file is not None and processor_class is None: (config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) processor_class = config_dict.get('processor_class', None) if 'AutoProcessor' in config_dict.get('auto_map', {}): processor_auto_map = config_dict['auto_map']['AutoProcessor'] if processor_class is None: tokenizer_config_file = get_file_from_repo(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding='utf-8') as reader: config_dict = json.load(reader) processor_class = config_dict.get('processor_class', None) if 'AutoProcessor' in config_dict.get('auto_map', {}): processor_auto_map = config_dict['auto_map']['AutoProcessor'] if processor_class is None: if not isinstance(config, PretrainedConfig): config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) processor_class = getattr(config, 'processor_class', None) if hasattr(config, 'auto_map') and 'AutoProcessor' in config.auto_map: processor_auto_map = config.auto_map['AutoProcessor'] if processor_class is not None: processor_class = processor_class_from_name(processor_class) has_remote_code = processor_auto_map is not None has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) if has_remote_code and trust_remote_code: processor_class = get_class_from_dynamic_module(processor_auto_map, pretrained_model_name_or_path, **kwargs) _ = kwargs.pop('code_revision', None) if os.path.isdir(pretrained_model_name_or_path): processor_class.register_for_auto_class() return processor_class.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) elif processor_class is not None: return processor_class.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) elif type(config) in PROCESSOR_MAPPING: return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs) try: return AutoTokenizer.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) except Exception: try: return AutoImageProcessor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) except Exception: pass try: return AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) except Exception: pass raise ValueError(f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains the files of at least one of those processing classes.") @staticmethod def register(config_class, processor_class, exist_ok=False): PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok) # File: transformers-main/src/transformers/models/auto/tokenization_auto.py """""" import importlib import json import os import warnings from collections import OrderedDict from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...modeling_gguf_pytorch_utils import load_gguf_checkpoint from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE from ...utils import cached_file, extract_commit_hash, is_g2p_en_available, is_sentencepiece_available, is_tokenizers_available, logging from ..encoder_decoder import EncoderDecoderConfig from .auto_factory import _LazyAutoMapping from .configuration_auto import CONFIG_MAPPING_NAMES, AutoConfig, config_class_to_model_type, model_type_to_module_name, replace_list_option_in_docstrings if is_tokenizers_available(): from ...tokenization_utils_fast import PreTrainedTokenizerFast else: PreTrainedTokenizerFast = None logger = logging.get_logger(__name__) if TYPE_CHECKING: TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict() else: TOKENIZER_MAPPING_NAMES = OrderedDict([('albert', ('AlbertTokenizer' if is_sentencepiece_available() else None, 'AlbertTokenizerFast' if is_tokenizers_available() else None)), ('align', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('bark', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('bart', ('BartTokenizer', 'BartTokenizerFast')), ('barthez', ('BarthezTokenizer' if is_sentencepiece_available() else None, 'BarthezTokenizerFast' if is_tokenizers_available() else None)), ('bartpho', ('BartphoTokenizer', None)), ('bert', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('bert-generation', ('BertGenerationTokenizer' if is_sentencepiece_available() else None, None)), ('bert-japanese', ('BertJapaneseTokenizer', None)), ('bertweet', ('BertweetTokenizer', None)), ('big_bird', ('BigBirdTokenizer' if is_sentencepiece_available() else None, 'BigBirdTokenizerFast' if is_tokenizers_available() else None)), ('bigbird_pegasus', ('PegasusTokenizer', 'PegasusTokenizerFast' if is_tokenizers_available() else None)), ('biogpt', ('BioGptTokenizer', None)), ('blenderbot', ('BlenderbotTokenizer', 'BlenderbotTokenizerFast')), ('blenderbot-small', ('BlenderbotSmallTokenizer', None)), ('blip', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('blip-2', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('bloom', (None, 'BloomTokenizerFast' if is_tokenizers_available() else None)), ('bridgetower', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('bros', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('byt5', ('ByT5Tokenizer', None)), ('camembert', ('CamembertTokenizer' if is_sentencepiece_available() else None, 'CamembertTokenizerFast' if is_tokenizers_available() else None)), ('canine', ('CanineTokenizer', None)), ('chameleon', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('chinese_clip', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('clap', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('clip', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('clipseg', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('clvp', ('ClvpTokenizer', None)), ('code_llama', ('CodeLlamaTokenizer' if is_sentencepiece_available() else None, 'CodeLlamaTokenizerFast' if is_tokenizers_available() else None)), ('codegen', ('CodeGenTokenizer', 'CodeGenTokenizerFast' if is_tokenizers_available() else None)), ('cohere', (None, 'CohereTokenizerFast' if is_tokenizers_available() else None)), ('convbert', ('ConvBertTokenizer', 'ConvBertTokenizerFast' if is_tokenizers_available() else None)), ('cpm', ('CpmTokenizer' if is_sentencepiece_available() else None, 'CpmTokenizerFast' if is_tokenizers_available() else None)), ('cpmant', ('CpmAntTokenizer', None)), ('ctrl', ('CTRLTokenizer', None)), ('data2vec-audio', ('Wav2Vec2CTCTokenizer', None)), ('data2vec-text', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('dbrx', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('deberta', ('DebertaTokenizer', 'DebertaTokenizerFast' if is_tokenizers_available() else None)), ('deberta-v2', ('DebertaV2Tokenizer' if is_sentencepiece_available() else None, 'DebertaV2TokenizerFast' if is_tokenizers_available() else None)), ('distilbert', ('DistilBertTokenizer', 'DistilBertTokenizerFast' if is_tokenizers_available() else None)), ('dpr', ('DPRQuestionEncoderTokenizer', 'DPRQuestionEncoderTokenizerFast' if is_tokenizers_available() else None)), ('electra', ('ElectraTokenizer', 'ElectraTokenizerFast' if is_tokenizers_available() else None)), ('ernie', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('ernie_m', ('ErnieMTokenizer' if is_sentencepiece_available() else None, None)), ('esm', ('EsmTokenizer', None)), ('falcon', (None, 'PreTrainedTokenizerFast' if is_tokenizers_available() else None)), ('falcon_mamba', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('fastspeech2_conformer', ('FastSpeech2ConformerTokenizer' if is_g2p_en_available() else None, None)), ('flaubert', ('FlaubertTokenizer', None)), ('fnet', ('FNetTokenizer', 'FNetTokenizerFast' if is_tokenizers_available() else None)), ('fsmt', ('FSMTTokenizer', None)), ('funnel', ('FunnelTokenizer', 'FunnelTokenizerFast' if is_tokenizers_available() else None)), ('gemma', ('GemmaTokenizer' if is_sentencepiece_available() else None, 'GemmaTokenizerFast' if is_tokenizers_available() else None)), ('gemma2', ('GemmaTokenizer' if is_sentencepiece_available() else None, 'GemmaTokenizerFast' if is_tokenizers_available() else None)), ('git', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('gpt-sw3', ('GPTSw3Tokenizer' if is_sentencepiece_available() else None, None)), ('gpt2', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('gpt_bigcode', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('gpt_neo', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('gpt_neox', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('gpt_neox_japanese', ('GPTNeoXJapaneseTokenizer', None)), ('gptj', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('gptsan-japanese', ('GPTSanJapaneseTokenizer', None)), ('grounding-dino', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('groupvit', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('herbert', ('HerbertTokenizer', 'HerbertTokenizerFast' if is_tokenizers_available() else None)), ('hubert', ('Wav2Vec2CTCTokenizer', None)), ('ibert', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('idefics', (None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('idefics2', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('instructblip', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('instructblipvideo', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('jamba', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('jetmoe', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('jukebox', ('JukeboxTokenizer', None)), ('kosmos-2', ('XLMRobertaTokenizer' if is_sentencepiece_available() else None, 'XLMRobertaTokenizerFast' if is_tokenizers_available() else None)), ('layoutlm', ('LayoutLMTokenizer', 'LayoutLMTokenizerFast' if is_tokenizers_available() else None)), ('layoutlmv2', ('LayoutLMv2Tokenizer', 'LayoutLMv2TokenizerFast' if is_tokenizers_available() else None)), ('layoutlmv3', ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast' if is_tokenizers_available() else None)), ('layoutxlm', ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast' if is_tokenizers_available() else None)), ('led', ('LEDTokenizer', 'LEDTokenizerFast' if is_tokenizers_available() else None)), ('lilt', ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast' if is_tokenizers_available() else None)), ('llama', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('llava', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('llava-onevision', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('llava_next', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('llava_next_video', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('longformer', ('LongformerTokenizer', 'LongformerTokenizerFast' if is_tokenizers_available() else None)), ('longt5', ('T5Tokenizer' if is_sentencepiece_available() else None, 'T5TokenizerFast' if is_tokenizers_available() else None)), ('luke', ('LukeTokenizer', None)), ('lxmert', ('LxmertTokenizer', 'LxmertTokenizerFast' if is_tokenizers_available() else None)), ('m2m_100', ('M2M100Tokenizer' if is_sentencepiece_available() else None, None)), ('mamba', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('mamba2', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('marian', ('MarianTokenizer' if is_sentencepiece_available() else None, None)), ('mbart', ('MBartTokenizer' if is_sentencepiece_available() else None, 'MBartTokenizerFast' if is_tokenizers_available() else None)), ('mbart50', ('MBart50Tokenizer' if is_sentencepiece_available() else None, 'MBart50TokenizerFast' if is_tokenizers_available() else None)), ('mega', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('megatron-bert', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('mgp-str', ('MgpstrTokenizer', None)), ('mistral', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('mixtral', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('mluke', ('MLukeTokenizer' if is_sentencepiece_available() else None, None)), ('mobilebert', ('MobileBertTokenizer', 'MobileBertTokenizerFast' if is_tokenizers_available() else None)), ('mpnet', ('MPNetTokenizer', 'MPNetTokenizerFast' if is_tokenizers_available() else None)), ('mpt', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('mra', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('mt5', ('MT5Tokenizer' if is_sentencepiece_available() else None, 'MT5TokenizerFast' if is_tokenizers_available() else None)), ('musicgen', ('T5Tokenizer', 'T5TokenizerFast' if is_tokenizers_available() else None)), ('musicgen_melody', ('T5Tokenizer', 'T5TokenizerFast' if is_tokenizers_available() else None)), ('mvp', ('MvpTokenizer', 'MvpTokenizerFast' if is_tokenizers_available() else None)), ('nezha', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('nllb', ('NllbTokenizer' if is_sentencepiece_available() else None, 'NllbTokenizerFast' if is_tokenizers_available() else None)), ('nllb-moe', ('NllbTokenizer' if is_sentencepiece_available() else None, 'NllbTokenizerFast' if is_tokenizers_available() else None)), ('nystromformer', ('AlbertTokenizer' if is_sentencepiece_available() else None, 'AlbertTokenizerFast' if is_tokenizers_available() else None)), ('olmo', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('olmoe', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('oneformer', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('openai-gpt', ('OpenAIGPTTokenizer', 'OpenAIGPTTokenizerFast' if is_tokenizers_available() else None)), ('opt', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('owlv2', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('owlvit', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('paligemma', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('pegasus', ('PegasusTokenizer' if is_sentencepiece_available() else None, 'PegasusTokenizerFast' if is_tokenizers_available() else None)), ('pegasus_x', ('PegasusTokenizer' if is_sentencepiece_available() else None, 'PegasusTokenizerFast' if is_tokenizers_available() else None)), ('perceiver', ('PerceiverTokenizer', None)), ('persimmon', ('LlamaTokenizer' if is_sentencepiece_available() else None, 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('phi', ('CodeGenTokenizer', 'CodeGenTokenizerFast' if is_tokenizers_available() else None)), ('phi3', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('phobert', ('PhobertTokenizer', None)), ('pix2struct', ('T5Tokenizer', 'T5TokenizerFast' if is_tokenizers_available() else None)), ('pixtral', (None, 'PreTrainedTokenizerFast' if is_tokenizers_available() else None)), ('plbart', ('PLBartTokenizer' if is_sentencepiece_available() else None, None)), ('prophetnet', ('ProphetNetTokenizer', None)), ('qdqbert', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('qwen2', ('Qwen2Tokenizer', 'Qwen2TokenizerFast' if is_tokenizers_available() else None)), ('qwen2_audio', ('Qwen2Tokenizer', 'Qwen2TokenizerFast' if is_tokenizers_available() else None)), ('qwen2_moe', ('Qwen2Tokenizer', 'Qwen2TokenizerFast' if is_tokenizers_available() else None)), ('rag', ('RagTokenizer', None)), ('realm', ('RealmTokenizer', 'RealmTokenizerFast' if is_tokenizers_available() else None)), ('recurrent_gemma', ('GemmaTokenizer' if is_sentencepiece_available() else None, 'GemmaTokenizerFast' if is_tokenizers_available() else None)), ('reformer', ('ReformerTokenizer' if is_sentencepiece_available() else None, 'ReformerTokenizerFast' if is_tokenizers_available() else None)), ('rembert', ('RemBertTokenizer' if is_sentencepiece_available() else None, 'RemBertTokenizerFast' if is_tokenizers_available() else None)), ('retribert', ('RetriBertTokenizer', 'RetriBertTokenizerFast' if is_tokenizers_available() else None)), ('roberta', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('roberta-prelayernorm', ('RobertaTokenizer', 'RobertaTokenizerFast' if is_tokenizers_available() else None)), ('roc_bert', ('RoCBertTokenizer', None)), ('roformer', ('RoFormerTokenizer', 'RoFormerTokenizerFast' if is_tokenizers_available() else None)), ('rwkv', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('seamless_m4t', ('SeamlessM4TTokenizer' if is_sentencepiece_available() else None, 'SeamlessM4TTokenizerFast' if is_tokenizers_available() else None)), ('seamless_m4t_v2', ('SeamlessM4TTokenizer' if is_sentencepiece_available() else None, 'SeamlessM4TTokenizerFast' if is_tokenizers_available() else None)), ('siglip', ('SiglipTokenizer' if is_sentencepiece_available() else None, None)), ('speech_to_text', ('Speech2TextTokenizer' if is_sentencepiece_available() else None, None)), ('speech_to_text_2', ('Speech2Text2Tokenizer', None)), ('speecht5', ('SpeechT5Tokenizer' if is_sentencepiece_available() else None, None)), ('splinter', ('SplinterTokenizer', 'SplinterTokenizerFast')), ('squeezebert', ('SqueezeBertTokenizer', 'SqueezeBertTokenizerFast' if is_tokenizers_available() else None)), ('stablelm', (None, 'GPTNeoXTokenizerFast' if is_tokenizers_available() else None)), ('starcoder2', ('GPT2Tokenizer', 'GPT2TokenizerFast' if is_tokenizers_available() else None)), ('switch_transformers', ('T5Tokenizer' if is_sentencepiece_available() else None, 'T5TokenizerFast' if is_tokenizers_available() else None)), ('t5', ('T5Tokenizer' if is_sentencepiece_available() else None, 'T5TokenizerFast' if is_tokenizers_available() else None)), ('tapas', ('TapasTokenizer', None)), ('tapex', ('TapexTokenizer', None)), ('transfo-xl', ('TransfoXLTokenizer', None)), ('tvp', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('udop', ('UdopTokenizer' if is_sentencepiece_available() else None, 'UdopTokenizerFast' if is_tokenizers_available() else None)), ('umt5', ('T5Tokenizer' if is_sentencepiece_available() else None, 'T5TokenizerFast' if is_tokenizers_available() else None)), ('video_llava', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('vilt', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('vipllava', ('LlamaTokenizer', 'LlamaTokenizerFast' if is_tokenizers_available() else None)), ('visual_bert', ('BertTokenizer', 'BertTokenizerFast' if is_tokenizers_available() else None)), ('vits', ('VitsTokenizer', None)), ('wav2vec2', ('Wav2Vec2CTCTokenizer', None)), ('wav2vec2-bert', ('Wav2Vec2CTCTokenizer', None)), ('wav2vec2-conformer', ('Wav2Vec2CTCTokenizer', None)), ('wav2vec2_phoneme', ('Wav2Vec2PhonemeCTCTokenizer', None)), ('whisper', ('WhisperTokenizer', 'WhisperTokenizerFast' if is_tokenizers_available() else None)), ('xclip', ('CLIPTokenizer', 'CLIPTokenizerFast' if is_tokenizers_available() else None)), ('xglm', ('XGLMTokenizer' if is_sentencepiece_available() else None, 'XGLMTokenizerFast' if is_tokenizers_available() else None)), ('xlm', ('XLMTokenizer', None)), ('xlm-prophetnet', ('XLMProphetNetTokenizer' if is_sentencepiece_available() else None, None)), ('xlm-roberta', ('XLMRobertaTokenizer' if is_sentencepiece_available() else None, 'XLMRobertaTokenizerFast' if is_tokenizers_available() else None)), ('xlm-roberta-xl', ('XLMRobertaTokenizer' if is_sentencepiece_available() else None, 'XLMRobertaTokenizerFast' if is_tokenizers_available() else None)), ('xlnet', ('XLNetTokenizer' if is_sentencepiece_available() else None, 'XLNetTokenizerFast' if is_tokenizers_available() else None)), ('xmod', ('XLMRobertaTokenizer' if is_sentencepiece_available() else None, 'XLMRobertaTokenizerFast' if is_tokenizers_available() else None)), ('yoso', ('AlbertTokenizer' if is_sentencepiece_available() else None, 'AlbertTokenizerFast' if is_tokenizers_available() else None))]) TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES) CONFIG_TO_TYPE = {v: k for (k, v) in CONFIG_MAPPING_NAMES.items()} def tokenizer_class_from_name(class_name: str): if class_name == 'PreTrainedTokenizerFast': return PreTrainedTokenizerFast for (module_name, tokenizers) in TOKENIZER_MAPPING_NAMES.items(): if class_name in tokenizers: module_name = model_type_to_module_name(module_name) module = importlib.import_module(f'.{module_name}', 'transformers.models') try: return getattr(module, class_name) except AttributeError: continue for (config, tokenizers) in TOKENIZER_MAPPING._extra_content.items(): for tokenizer in tokenizers: if getattr(tokenizer, '__name__', None) == class_name: return tokenizer main_module = importlib.import_module('transformers') if hasattr(main_module, class_name): return getattr(main_module, class_name) return None def get_tokenizer_config(pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, resume_download: Optional[bool]=None, proxies: Optional[Dict[str, str]]=None, token: Optional[Union[bool, str]]=None, revision: Optional[str]=None, local_files_only: bool=False, subfolder: str='', **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if token is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') token = use_auth_token commit_hash = kwargs.get('_commit_hash', None) resolved_config_file = cached_file(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash) if resolved_config_file is None: logger.info('Could not locate the tokenizer configuration file, will try to use the model config instead.') return {} commit_hash = extract_commit_hash(resolved_config_file, commit_hash) with open(resolved_config_file, encoding='utf-8') as reader: result = json.load(reader) result['_commit_hash'] = commit_hash return result class AutoTokenizer: def __init__(self): raise EnvironmentError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.') @classmethod @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES) def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): use_auth_token = kwargs.pop('use_auth_token', None) if use_auth_token is not None: warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning) if kwargs.get('token', None) is not None: raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.') kwargs['token'] = use_auth_token config = kwargs.pop('config', None) kwargs['_from_auto'] = True use_fast = kwargs.pop('use_fast', True) tokenizer_type = kwargs.pop('tokenizer_type', None) trust_remote_code = kwargs.pop('trust_remote_code', None) gguf_file = kwargs.get('gguf_file', None) if tokenizer_type is not None: tokenizer_class = None tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None) if tokenizer_class_tuple is None: raise ValueError(f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of {', '.join((c for c in TOKENIZER_MAPPING_NAMES.keys()))}.") (tokenizer_class_name, tokenizer_fast_class_name) = tokenizer_class_tuple if use_fast: if tokenizer_fast_class_name is not None: tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name) else: logger.warning('`use_fast` is set to `True` but the tokenizer class does not have a fast version. Falling back to the slow version.') if tokenizer_class is None: tokenizer_class = tokenizer_class_from_name(tokenizer_class_name) if tokenizer_class is None: raise ValueError(f'Tokenizer class {tokenizer_class_name} is not currently imported.') return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) if '_commit_hash' in tokenizer_config: kwargs['_commit_hash'] = tokenizer_config['_commit_hash'] config_tokenizer_class = tokenizer_config.get('tokenizer_class') tokenizer_auto_map = None if 'auto_map' in tokenizer_config: if isinstance(tokenizer_config['auto_map'], (tuple, list)): tokenizer_auto_map = tokenizer_config['auto_map'] else: tokenizer_auto_map = tokenizer_config['auto_map'].get('AutoTokenizer', None) if config_tokenizer_class is None: if not isinstance(config, PretrainedConfig): if gguf_file: gguf_path = cached_file(pretrained_model_name_or_path, gguf_file, **kwargs) config_dict = load_gguf_checkpoint(gguf_path, return_tensors=False)['config'] config = AutoConfig.for_model(**config_dict) else: config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs) config_tokenizer_class = config.tokenizer_class if hasattr(config, 'auto_map') and 'AutoTokenizer' in config.auto_map: tokenizer_auto_map = config.auto_map['AutoTokenizer'] has_remote_code = tokenizer_auto_map is not None has_local_code = type(config) in TOKENIZER_MAPPING or (config_tokenizer_class is not None and (tokenizer_class_from_name(config_tokenizer_class) is not None or tokenizer_class_from_name(config_tokenizer_class + 'Fast') is not None)) trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code) if has_remote_code and trust_remote_code: if use_fast and tokenizer_auto_map[1] is not None: class_ref = tokenizer_auto_map[1] else: class_ref = tokenizer_auto_map[0] tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs) _ = kwargs.pop('code_revision', None) if os.path.isdir(pretrained_model_name_or_path): tokenizer_class.register_for_auto_class() return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs) elif config_tokenizer_class is not None: tokenizer_class = None if use_fast and (not config_tokenizer_class.endswith('Fast')): tokenizer_class_candidate = f'{config_tokenizer_class}Fast' tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: tokenizer_class_candidate = config_tokenizer_class tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) if tokenizer_class is None: raise ValueError(f'Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported.') return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) if isinstance(config, EncoderDecoderConfig): if type(config.decoder) is not type(config.encoder): logger.warning(f'The encoder model config class: {config.encoder.__class__} is different from the decoder model config class: {config.decoder.__class__}. It is not recommended to use the `AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder specific tokenizer classes.') config = config.encoder model_type = config_class_to_model_type(type(config).__name__) if model_type is not None: (tokenizer_class_py, tokenizer_class_fast) = TOKENIZER_MAPPING[type(config)] if tokenizer_class_fast and (use_fast or tokenizer_class_py is None): return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) elif tokenizer_class_py is not None: return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) else: raise ValueError('This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.') raise ValueError(f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\nModel type should be one of {', '.join((c.__name__ for c in TOKENIZER_MAPPING.keys()))}.") def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False): if slow_tokenizer_class is None and fast_tokenizer_class is None: raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class') if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast): raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.') if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer): raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.') if slow_tokenizer_class is not None and fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class): raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!') if config_class in TOKENIZER_MAPPING._extra_content: (existing_slow, existing_fast) = TOKENIZER_MAPPING[config_class] if slow_tokenizer_class is None: slow_tokenizer_class = existing_slow if fast_tokenizer_class is None: fast_tokenizer_class = existing_fast TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok) # File: transformers-main/src/transformers/models/autoformer/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_autoformer': ['AutoformerConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_autoformer'] = ['AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel'] if TYPE_CHECKING: from .configuration_autoformer import AutoformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/autoformer/configuration_autoformer.py """""" from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class AutoformerConfig(PretrainedConfig): model_type = 'autoformer' attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers'} def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: List[int]=[1, 2, 3, 4, 5, 6, 7], scaling: bool=True, num_time_features: int=0, num_dynamic_real_features: int=0, num_static_categorical_features: int=0, num_static_real_features: int=0, cardinality: Optional[List[int]]=None, embedding_dimension: Optional[List[int]]=None, d_model: int=64, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, activation_function: str='gelu', dropout: float=0.1, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache: bool=True, is_encoder_decoder=True, label_length: int=10, moving_average: int=25, autocorrelation_factor: int=3, **kwargs): self.prediction_length = prediction_length self.context_length = context_length if context_length is not None else prediction_length self.distribution_output = distribution_output self.loss = loss self.input_size = input_size self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.scaling = scaling self.num_dynamic_real_features = num_dynamic_real_features self.num_static_real_features = num_static_real_features self.num_static_categorical_features = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(cardinality) != num_static_categorical_features: raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`') self.cardinality = cardinality else: self.cardinality = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(embedding_dimension) != num_static_categorical_features: raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`') self.embedding_dimension = embedding_dimension else: self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] self.num_parallel_samples = num_parallel_samples self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features self.d_model = d_model self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache self.label_length = label_length self.moving_average = moving_average self.autocorrelation_factor = autocorrelation_factor super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def _number_of_features(self) -> int: return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # File: transformers-main/src/transformers/models/autoformer/modeling_autoformer.py """""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput from ...modeling_utils import PreTrainedModel from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_autoformer import AutoformerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'AutoformerConfig' @dataclass class AutoFormerDecoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None trend: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class AutoformerModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None trend: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None class AutoformerFeatureEmbedder(nn.Module): def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for (c, d) in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat([embed(cat_feature_slice.squeeze(-1)) for (embed, cat_feature_slice) in zip(self.embedders, cat_feature_slices)], dim=-1) class AutoformerStdScaler(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05 def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return ((data - loc) / scale, loc, scale) class AutoformerMeanScaler(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10 self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) scale = torch.where(num_observed > 0, scale, default_scale) scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return (scaled_data, torch.zeros_like(scale), scale) class AutoformerNOPScaler(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return (data, loc, scale) def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) -> torch.Tensor: if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: return -input.log_prob(target) class AutoformerSinusoidalPositionalEmbedding(nn.Embedding): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None: super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter) -> nn.Parameter: (n_pos, dim) = out.shape position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0) -> torch.Tensor: (bsz, seq_len) = input_ids_shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions) class AutoformerValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) class AutoformerSeriesDecompositionLayer(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.kernel_size = config.moving_average self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0) def forward(self, x): num_of_pads = (self.kernel_size - 1) // 2 front = x[:, 0:1, :].repeat(1, num_of_pads, 1) end = x[:, -1:, :].repeat(1, num_of_pads, 1) x_padded = torch.cat([front, x, end], dim=1) x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1) x_seasonal = x - x_trend return (x_seasonal, x_trend) class AutoformerLayernorm(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.layernorm = nn.LayerNorm(config.d_model) def forward(self, x): x_hat = self.layernorm(x) bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) return x_hat - bias class AutoformerAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, autocorrelation_factor: int=3): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.autocorrelation_factor = autocorrelation_factor def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) queries_time_length = query_states.size(1) values_time_length = value_states.size(1) if queries_time_length > values_time_length: query_states = query_states[:, :queries_time_length - values_time_length, :] zeros = torch.zeros_like(query_states).float() value_states = torch.cat([value_states, zeros], dim=1) key_states = torch.cat([key_states, zeros], dim=1) else: value_states = value_states[:, :queries_time_length, :] key_states = key_states[:, :queries_time_length, :] query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1) key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1) attn_weights = query_states_fft * torch.conj(key_states_fft) attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) src_len = key_states.size(1) channel = key_states.size(2) if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel) else: attn_weights_reshaped = None time_length = value_states.size(1) autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel) top_k = int(self.autocorrelation_factor * math.log(time_length)) autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) if self.training: autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0) (_, top_k_delays_index) = torch.topk(autocorrelations_mean_on_bsz, top_k) top_k_autocorrelations = torch.stack([autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1) else: (top_k_autocorrelations, top_k_delays_index) = torch.topk(autocorrelations_mean_on_head_channel, top_k, dim=1) top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) if not self.training: tmp_values = value_states.repeat(1, 2, 1) init_index = torch.arange(time_length).view(1, -1, 1).repeat(bsz * self.num_heads, 1, channel).to(value_states.device) delays_agg = torch.zeros_like(value_states).float() for i in range(top_k): if not self.training: tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel) value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay) else: value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1) top_k_autocorrelations_at_delay = top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel) delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay attn_output = delays_agg.contiguous() if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class AutoformerEncoderLayer(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = AutoformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, autocorrelation_factor=config.autocorrelation_factor) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = AutoformerLayernorm(config) self.decomp1 = AutoformerSeriesDecompositionLayer(config) self.decomp2 = AutoformerSeriesDecompositionLayer(config) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, _) = self.decomp1(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states (hidden_states, _) = self.decomp2(hidden_states) hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class AutoformerDecoderLayer(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = AutoformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = AutoformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = AutoformerLayernorm(config) self.decomp1 = AutoformerSeriesDecompositionLayer(config) self.decomp2 = AutoformerSeriesDecompositionLayer(config) self.decomp3 = AutoformerSeriesDecompositionLayer(config) self.trend_projection = nn.Conv1d(in_channels=self.embed_dim, out_channels=config.feature_size, kernel_size=3, stride=1, padding=1, padding_mode='circular', bias=False) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states (hidden_states, trend1) = self.decomp1(hidden_states) hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states (hidden_states, trend2) = self.decomp2(hidden_states) hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states (hidden_states, trend3) = self.decomp3(hidden_states) hidden_states = self.final_layer_norm(hidden_states) if encoder_hidden_states is not None: residual_trend = trend1 + trend2 + trend3 else: residual_trend = trend1 + trend3 residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2) outputs = ((hidden_states, residual_trend),) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class AutoformerPreTrainedModel(PreTrainedModel): config_class = AutoformerConfig base_model_prefix = 'model' main_input_name = 'past_values' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, AutoformerSinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() AUTOFORMER_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`AutoformerConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' AUTOFORMER_INPUTS_DOCSTRING = '\n Args:\n past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Past values of the time series, that serve as context in order to predict the future. These values may\n contain lags, i.e. additional values from the past which are added in order to serve as "extra context".\n The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as\n `static_categorical_features`, `static_real_features`, `past_time_features`).\n\n The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.\n\n Missing values need to be replaced with zeros.\n\n past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):\n Optional time features, which the model internally will add to `past_values`. These could be things like\n "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called "age" features, which basically help the model know "at which point in life" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step.\n\n These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features.\n\n The Autoformer only learns additional embeddings for `static_categorical_features`.\n\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in\n `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):\n Optional static categorical features for which the model will learn an embedding, which it will add to the\n values of the time series.\n\n Static categorical features are features which have the same value for all time steps (static over time).\n\n A typical example of a static categorical feature is a time series ID.\n\n static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):\n Optional static real features which the model will add to the values of the time series.\n\n Static real features are features which have the same value for all time steps (static over time).\n\n A typical example of a static real feature is promotion information.\n\n future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):\n Future values of the time series, that serve as labels for the model. The `future_values` is what the\n Transformer needs to learn to output, given the `past_values`.\n\n See the demo notebook and code snippets for details.\n\n Missing values need to be replaced with zeros.\n\n future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):\n Optional time features, which the model internally will add to `future_values`. These could be things like\n "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called "age" features, which basically help the model know "at which point in life" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step.\n\n These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional features.\n\n The Autoformer only learns additional embeddings for `static_categorical_features`.\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to\n make sure the model can only look at previous inputs in order to predict the future.\n\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don\'t have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model\'s internal embedding lookup matrix.\n\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class AutoformerEncoder(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop if config.prediction_length is None: raise ValueError('The `prediction_length` config needs to be specified.') self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = AutoformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model) self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class AutoformerDecoder(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError('The `prediction_length` config needs to be specified.') self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = AutoformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model) self.layers = nn.ModuleList([AutoformerDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.seasonality_projection = nn.Linear(config.d_model, config.feature_size) self.gradient_checkpointing = False self.post_init() def forward(self, trend: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, AutoFormerDecoderOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) (hidden_states, residual_trend) = layer_outputs[0] trend = trend + residual_trend if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.seasonality_projection(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, trend, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return AutoFormerDecoderOutput(last_hidden_state=hidden_states, trend=trend, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare Autoformer Model outputting raw hidden-states without any specific head on top.', AUTOFORMER_START_DOCSTRING) class AutoformerModel(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) if config.scaling == 'mean' or config.scaling is True: self.scaler = AutoformerMeanScaler(config) elif config.scaling == 'std': self.scaler = AutoformerStdScaler(config) else: self.scaler = AutoformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = AutoformerFeatureEmbedder(cardinalities=config.cardinality, embedding_dims=config.embedding_dimension) self.encoder = AutoformerEncoder(config) self.decoder = AutoformerDecoder(config) self.decomposition_layer = AutoformerSeriesDecompositionLayer(config) self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor: indices = [lag - shift for lag in self.config.lags_sequence] sequence_length = sequence.shape[1] if max(indices) + subsequences_length > sequence_length: raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}') lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1) def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...] if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length:] observed_context = past_observed_mask[:, -self.config.context_length:] (_, loc, scale) = self.scaler(context, observed_context) inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p() log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) features = torch.cat((expanded_static_feat, time_feat), dim=-1) subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match') return (reshaped_lagged_sequence, features, loc, scale, static_feat) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AutoformerModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[AutoformerModelOutput, Tuple]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict (transformer_inputs, temporal_features, loc, scale, static_feat) = self.create_network_inputs(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features) if encoder_outputs is None: enc_input = torch.cat((transformer_inputs[:, :self.config.context_length, ...], temporal_features[:, :self.config.context_length, ...]), dim=-1) encoder_outputs = self.encoder(inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) if future_values is not None: (seasonal_input, trend_input) = self.decomposition_layer(transformer_inputs[:, :self.config.context_length, ...]) mean = torch.mean(transformer_inputs[:, :self.config.context_length, ...], dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1) zeros = torch.zeros([transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device) decoder_input = torch.cat((torch.cat((seasonal_input[:, -self.config.label_length:, ...], zeros), dim=1), temporal_features[:, self.config.context_length - self.config.label_length:, ...]), dim=-1) trend_init = torch.cat((torch.cat((trend_input[:, -self.config.label_length:, ...], mean), dim=1), temporal_features[:, self.config.context_length - self.config.label_length:, ...]), dim=-1) decoder_outputs = self.decoder(trend=trend_init, inputs_embeds=decoder_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) else: decoder_outputs = AutoFormerDecoderOutput() if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return AutoformerModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, trend=decoder_outputs.trend, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat) @add_start_docstrings('The Autoformer Model with a distribution head on top for time-series forecasting.', AUTOFORMER_START_DOCSTRING) class AutoformerForPrediction(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) self.model = AutoformerModel(config) if config.distribution_output == 'student_t': self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == 'normal': self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == 'negative_binomial': self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f'Unknown distribution output {config.distribution_output}') self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size) self.target_shape = self.distribution_output.event_shape if config.loss == 'nll': self.loss = nll else: raise ValueError(f'Unknown loss function {config.loss}') self.post_init() def output_params(self, decoder_output): return self.parameter_projection(decoder_output[:, -self.config.prediction_length:, :]) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSPredictionOutput, config_class=_CONFIG_FOR_DOC) def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Seq2SeqTSPredictionOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict) prediction_loss = None params = None if future_values is not None: params = self.output_params(outputs[0] + outputs[1]) distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: (loss_weights, _) = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = (params,) + outputs[2:] if params is not None else outputs[2:] return (prediction_loss,) + outputs if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features) @torch.no_grad() def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput: outputs = self(static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=None, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=False) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = (past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc) / repeated_scale time_features = torch.cat((past_time_features, future_time_features), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1) features = torch.cat((expanded_static_feat, time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) lagged_sequence = self.model.get_lagged_subsequences(sequence=repeated_past_values, subsequences_length=self.config.context_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) (seasonal_input, trend_input) = self.model.decomposition_layer(reshaped_lagged_sequence) mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1) zeros = torch.zeros([reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]], device=reshaped_lagged_sequence.device) decoder_input = torch.cat((torch.cat((seasonal_input[:, -self.config.label_length:, ...], zeros), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length:, ...]), dim=-1) trend_init = torch.cat((torch.cat((trend_input[:, -self.config.label_length:, ...], mean), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length:, ...]), dim=-1) decoder_outputs = decoder(trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) decoder_last_hidden = decoder_outputs.last_hidden_state trend = decoder_outputs.trend params = self.output_params(decoder_last_hidden + trend) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) future_samples = distr.sample() return SampleTSPredictionOutput(sequences=future_samples.reshape((-1, num_parallel_samples, self.config.prediction_length) + self.target_shape)) # File: transformers-main/src/transformers/models/bark/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_bark': ['BarkCoarseConfig', 'BarkConfig', 'BarkFineConfig', 'BarkSemanticConfig'], 'processing_bark': ['BarkProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bark'] = ['BarkFineModel', 'BarkSemanticModel', 'BarkCoarseModel', 'BarkModel', 'BarkPreTrainedModel', 'BarkCausalModel'] if TYPE_CHECKING: from .configuration_bark import BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig from .processing_bark import BarkProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bark import BarkCausalModel, BarkCoarseModel, BarkFineModel, BarkModel, BarkPreTrainedModel, BarkSemanticModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bark/configuration_bark.py """""" import os from typing import Dict, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings, logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) BARK_SUBMODELCONFIG_START_DOCSTRING = '\n This is the configuration class to store the configuration of a [`{model}`]. It is used to instantiate the model\n according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the Bark [suno/bark](https://huggingface.co/suno/bark)\n architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n block_size (`int`, *optional*, defaults to 1024):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n input_vocab_size (`int`, *optional*, defaults to 10_048):\n Vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`{model}`]. Defaults to 10_048 but should be carefully thought with\n regards to the chosen sub-model.\n output_vocab_size (`int`, *optional*, defaults to 10_048):\n Output vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented\n by the: `output_ids` when passing forward a [`{model}`]. Defaults to 10_048 but should be carefully thought\n with regards to the chosen sub-model.\n num_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the given sub-model.\n num_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer architecture.\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the "intermediate" (often named feed-forward) layer in the architecture.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n bias (`bool`, *optional*, defaults to `True`):\n Whether or not to use bias in the linear layers and layer norm layers.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n' class BarkSubModelConfig(PretrainedConfig): model_type = 'bark_module' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', 'vocab_size': 'input_vocab_size', 'window_size': 'block_size'} def __init__(self, block_size=1024, input_vocab_size=10048, output_vocab_size=10048, num_layers=12, num_heads=12, hidden_size=768, dropout=0.0, bias=True, initializer_range=0.02, use_cache=True, **kwargs): self.block_size = block_size self.input_vocab_size = input_vocab_size self.output_vocab_size = output_vocab_size self.num_layers = num_layers self.num_heads = num_heads self.hidden_size = hidden_size self.dropout = dropout self.bias = bias self.use_cache = use_cache self.initializer_range = initializer_range super().__init__(**kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> 'PretrainedConfig': kwargs['cache_dir'] = cache_dir kwargs['force_download'] = force_download kwargs['local_files_only'] = local_files_only kwargs['revision'] = revision cls._set_token_in_kwargs(kwargs, token) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'bark': config_dict = config_dict[f'{cls.model_type}_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) @add_start_docstrings(BARK_SUBMODELCONFIG_START_DOCSTRING.format(config='BarkSemanticConfig', model='BarkSemanticModel'), '\n Example:\n\n ```python\n >>> from transformers import BarkSemanticConfig, BarkSemanticModel\n\n >>> # Initializing a Bark sub-module style configuration\n >>> configuration = BarkSemanticConfig()\n\n >>> # Initializing a model (with random weights) from the suno/bark style configuration\n >>> model = BarkSemanticModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```') class BarkSemanticConfig(BarkSubModelConfig): model_type = 'semantic' @add_start_docstrings(BARK_SUBMODELCONFIG_START_DOCSTRING.format(config='BarkCoarseConfig', model='BarkCoarseModel'), '\n Example:\n\n ```python\n >>> from transformers import BarkCoarseConfig, BarkCoarseModel\n\n >>> # Initializing a Bark sub-module style configuration\n >>> configuration = BarkCoarseConfig()\n\n >>> # Initializing a model (with random weights) from the suno/bark style configuration\n >>> model = BarkCoarseModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```') class BarkCoarseConfig(BarkSubModelConfig): model_type = 'coarse_acoustics' @add_start_docstrings(BARK_SUBMODELCONFIG_START_DOCSTRING.format(config='BarkFineConfig', model='BarkFineModel'), '\n n_codes_total (`int`, *optional*, defaults to 8):\n The total number of audio codebooks predicted. Used in the fine acoustics sub-model.\n n_codes_given (`int`, *optional*, defaults to 1):\n The number of audio codebooks predicted in the coarse acoustics sub-model. Used in the acoustics\n sub-models.\n Example:\n\n ```python\n >>> from transformers import BarkFineConfig, BarkFineModel\n\n >>> # Initializing a Bark sub-module style configuration\n >>> configuration = BarkFineConfig()\n\n >>> # Initializing a model (with random weights) from the suno/bark style configuration\n >>> model = BarkFineModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```') class BarkFineConfig(BarkSubModelConfig): model_type = 'fine_acoustics' def __init__(self, tie_word_embeddings=True, n_codes_total=8, n_codes_given=1, **kwargs): self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class BarkConfig(PretrainedConfig): model_type = 'bark' def __init__(self, semantic_config: Dict=None, coarse_acoustics_config: Dict=None, fine_acoustics_config: Dict=None, codec_config: Dict=None, initializer_range=0.02, **kwargs): if semantic_config is None: semantic_config = {} logger.info('semantic_config is None. initializing the semantic model with default values.') if coarse_acoustics_config is None: coarse_acoustics_config = {} logger.info('coarse_acoustics_config is None. initializing the coarse model with default values.') if fine_acoustics_config is None: fine_acoustics_config = {} logger.info('fine_acoustics_config is None. initializing the fine model with default values.') if codec_config is None: codec_config = {} logger.info('codec_config is None. initializing the codec model with default values.') self.semantic_config = BarkSemanticConfig(**semantic_config) self.coarse_acoustics_config = BarkCoarseConfig(**coarse_acoustics_config) self.fine_acoustics_config = BarkFineConfig(**fine_acoustics_config) codec_model_type = codec_config['model_type'] if 'model_type' in codec_config else 'encodec' self.codec_config = CONFIG_MAPPING[codec_model_type](**codec_config) self.initializer_range = initializer_range super().__init__(**kwargs) @classmethod def from_sub_model_configs(cls, semantic_config: BarkSemanticConfig, coarse_acoustics_config: BarkCoarseConfig, fine_acoustics_config: BarkFineConfig, codec_config: PretrainedConfig, **kwargs): return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), codec_config=codec_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/bark/convert_suno_to_hf.py """""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig from transformers.models.bark.generation_configuration_bark import BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) set_seed(770) new_layer_name_dict = {'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer'} REMOTE_MODEL_PATHS = {'text_small': {'repo_id': 'suno/bark', 'file_name': 'text.pt'}, 'coarse_small': {'repo_id': 'suno/bark', 'file_name': 'coarse.pt'}, 'fine_small': {'repo_id': 'suno/bark', 'file_name': 'fine.pt'}, 'text': {'repo_id': 'suno/bark', 'file_name': 'text_2.pt'}, 'coarse': {'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt'}, 'fine': {'repo_id': 'suno/bark', 'file_name': 'fine_2.pt'}} CUR_PATH = os.path.dirname(os.path.abspath(__file__)) default_cache_dir = os.path.join(os.path.expanduser('~'), '.cache') CACHE_DIR = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def _get_ckpt_path(model_type, use_small=False): key = model_type if use_small: key += '_small' return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]['file_name']) def _download(from_hf_path, file_name): os.makedirs(CACHE_DIR, exist_ok=True) hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR) def _load_model(ckpt_path, device, use_small=False, model_type='text'): if model_type == 'text': ModelClass = BarkSemanticModel ConfigClass = BarkSemanticConfig GenerationConfigClass = BarkSemanticGenerationConfig elif model_type == 'coarse': ModelClass = BarkCoarseModel ConfigClass = BarkCoarseConfig GenerationConfigClass = BarkCoarseGenerationConfig elif model_type == 'fine': ModelClass = BarkFineModel ConfigClass = BarkFineConfig GenerationConfigClass = BarkFineGenerationConfig else: raise NotImplementedError() model_key = f'{model_type}_small' if use_small else model_type model_info = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(ckpt_path): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.') _download(model_info['repo_id'], model_info['file_name']) checkpoint = torch.load(ckpt_path, map_location=device) model_args = checkpoint['model_args'] if 'input_vocab_size' not in model_args: model_args['input_vocab_size'] = model_args['vocab_size'] model_args['output_vocab_size'] = model_args['vocab_size'] del model_args['vocab_size'] model_args['num_heads'] = model_args.pop('n_head') model_args['hidden_size'] = model_args.pop('n_embd') model_args['num_layers'] = model_args.pop('n_layer') model_config = ConfigClass(**checkpoint['model_args']) model = ModelClass(config=model_config) model_generation_config = GenerationConfigClass() model.generation_config = model_generation_config state_dict = checkpoint['model'] unwanted_prefix = '_orig_mod.' for (k, v) in list(state_dict.items()): if k.startswith(unwanted_prefix): new_k = k[len(unwanted_prefix):] for old_layer_name in new_layer_name_dict: new_k = new_k.replace(old_layer_name, new_layer_name_dict[old_layer_name]) state_dict[new_k] = state_dict.pop(k) extra_keys = set(state_dict.keys()) - set(model.state_dict().keys()) extra_keys = {k for k in extra_keys if not k.endswith('.attn.bias')} missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) missing_keys = {k for k in missing_keys if not k.endswith('.attn.bias')} if len(extra_keys) != 0: raise ValueError(f'extra keys found: {extra_keys}') if len(missing_keys) != 0: raise ValueError(f'missing keys: {missing_keys}') model.load_state_dict(state_dict, strict=False) n_params = model.num_parameters(exclude_embeddings=True) val_loss = checkpoint['best_val_loss'].item() logger.info(f'model loaded: {round(n_params / 1000000.0, 1)}M params, {round(val_loss, 3)} loss') model.eval() model.to(device) del checkpoint, state_dict return model def load_model(pytorch_dump_folder_path, use_small=False, model_type='text'): if model_type not in ('text', 'coarse', 'fine'): raise NotImplementedError() device = 'cpu' ckpt_path = _get_ckpt_path(model_type, use_small=use_small) model = _load_model(ckpt_path, device, model_type=model_type, use_small=use_small) bark_model = _bark_load_model(ckpt_path, 'cpu', model_type=model_type, use_small=use_small) if model_type == 'text': bark_model = bark_model['model'] if model.num_parameters(exclude_embeddings=True) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters") batch_size = 5 sequence_length = 10 if model_type in ['text', 'coarse']: vec = torch.randint(256, (batch_size, sequence_length), dtype=torch.int) output_old_model = bark_model(vec)[0] output_new_model_total = model(vec) output_new_model = output_new_model_total.logits[:, [-1], :] else: prediction_codeboook_channel = 3 n_codes_total = 8 vec = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int) output_new_model_total = model(prediction_codeboook_channel, vec) output_old_model = bark_model(prediction_codeboook_channel, vec) output_new_model = output_new_model_total.logits if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape") if (output_new_model - output_old_model).abs().max().item() > 0.001: raise ValueError('initial and new outputs are not equal') Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) def load_whole_bark_model(semantic_path, coarse_path, fine_path, append_text, hub_path, folder_path): pytorch_dump_folder_path = os.path.join(folder_path, append_text) semanticConfig = BarkSemanticConfig.from_pretrained(os.path.join(semantic_path, 'config.json')) coarseAcousticConfig = BarkCoarseConfig.from_pretrained(os.path.join(coarse_path, 'config.json')) fineAcousticConfig = BarkFineConfig.from_pretrained(os.path.join(fine_path, 'config.json')) codecConfig = EncodecConfig.from_pretrained('facebook/encodec_24khz') semantic = BarkSemanticModel.from_pretrained(semantic_path) coarseAcoustic = BarkCoarseModel.from_pretrained(coarse_path) fineAcoustic = BarkFineModel.from_pretrained(fine_path) codec = EncodecModel.from_pretrained('facebook/encodec_24khz') bark_config = BarkConfig.from_sub_model_configs(semanticConfig, coarseAcousticConfig, fineAcousticConfig, codecConfig) bark_generation_config = BarkGenerationConfig.from_sub_model_configs(semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config) bark = BarkModel(bark_config) bark.semantic = semantic bark.coarse_acoustics = coarseAcoustic bark.fine_acoustics = fineAcoustic bark.codec_model = codec bark.generation_config = bark_generation_config Path(pytorch_dump_folder_path).mkdir(exist_ok=True) bark.save_pretrained(pytorch_dump_folder_path, repo_id=hub_path, push_to_hub=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') args = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) # File: transformers-main/src/transformers/models/bark/generation_configuration_bark.py """""" import copy from typing import Dict from ...generation.configuration_utils import GenerationConfig from ...utils import logging logger = logging.get_logger(__name__) class BarkSemanticGenerationConfig(GenerationConfig): model_type = 'semantic' def __init__(self, eos_token_id=10000, renormalize_logits=True, max_new_tokens=768, output_scores=False, return_dict_in_generate=False, output_hidden_states=False, output_attentions=False, temperature=1.0, do_sample=False, text_encoding_offset=10048, text_pad_token=129595, semantic_infer_token=129599, semantic_vocab_size=10000, max_input_semantic_length=256, semantic_rate_hz=49.9, min_eos_p=None, **kwargs): super().__init__(temperature=temperature, do_sample=do_sample, eos_token_id=eos_token_id, renormalize_logits=renormalize_logits, max_new_tokens=max_new_tokens, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, output_hidden_states=output_hidden_states, output_attentions=output_attentions, **kwargs) self.text_encoding_offset = text_encoding_offset self.text_pad_token = text_pad_token self.semantic_pad_token = eos_token_id self.semantic_infer_token = semantic_infer_token self.semantic_vocab_size = semantic_vocab_size self.max_input_semantic_length = max_input_semantic_length self.semantic_rate_hz = semantic_rate_hz self.min_eos_p = min_eos_p class BarkCoarseGenerationConfig(GenerationConfig): model_type = 'coarse_acoustics' def __init__(self, renormalize_logits=True, output_scores=False, return_dict_in_generate=False, output_hidden_states=False, output_attentions=False, temperature=1.0, do_sample=False, coarse_semantic_pad_token=12048, coarse_rate_hz=75, n_coarse_codebooks=2, coarse_infer_token=12050, max_coarse_input_length=256, max_coarse_history: int=630, sliding_window_len: int=60, **kwargs): super().__init__(temperature=temperature, do_sample=do_sample, renormalize_logits=renormalize_logits, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, output_hidden_states=output_hidden_states, output_attentions=output_attentions, **kwargs) self.coarse_semantic_pad_token = coarse_semantic_pad_token self.coarse_rate_hz = coarse_rate_hz self.n_coarse_codebooks = n_coarse_codebooks self.coarse_infer_token = coarse_infer_token self.max_coarse_input_length = max_coarse_input_length self.max_coarse_history = max_coarse_history self.sliding_window_len = sliding_window_len class BarkFineGenerationConfig(GenerationConfig): model_type = 'fine_acoustics' def __init__(self, temperature=1.0, max_fine_history_length=512, max_fine_input_length=1024, n_fine_codebooks=8, **kwargs): super().__init__(temperature=temperature) self.max_fine_history_length = max_fine_history_length self.max_fine_input_length = max_fine_input_length self.n_fine_codebooks = n_fine_codebooks def validate(self, **kwargs): pass class BarkGenerationConfig(GenerationConfig): model_type = 'bark' is_composition = True def __init__(self, semantic_config: Dict=None, coarse_acoustics_config: Dict=None, fine_acoustics_config: Dict=None, sample_rate=24000, codebook_size=1024, **kwargs): if semantic_config is None: semantic_config = {} logger.info('semantic_config is None. initializing the semantic model with default values.') if coarse_acoustics_config is None: coarse_acoustics_config = {} logger.info('coarse_acoustics_config is None. initializing the coarse model with default values.') if fine_acoustics_config is None: fine_acoustics_config = {} logger.info('fine_acoustics_config is None. initializing the fine model with default values.') self.semantic_config = BarkSemanticGenerationConfig(**semantic_config) self.coarse_acoustics_config = BarkCoarseGenerationConfig(**coarse_acoustics_config) self.fine_acoustics_config = BarkFineGenerationConfig(**fine_acoustics_config) self.sample_rate = sample_rate self.codebook_size = codebook_size @classmethod def from_sub_model_configs(cls, semantic_config: BarkSemanticGenerationConfig, coarse_acoustics_config: BarkCoarseGenerationConfig, fine_acoustics_config: BarkFineGenerationConfig, **kwargs): return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), **kwargs) def to_dict(self): output = copy.deepcopy(self.__dict__) output['semantic_config'] = self.semantic_config.to_dict() output['coarse_acoustics_config'] = self.coarse_acoustics_config.to_dict() output['fine_acoustics_config'] = self.fine_acoustics_config.to_dict() output['model_type'] = self.__class__.model_type return output # File: transformers-main/src/transformers/models/bark/modeling_bark.py """""" import math from typing import Dict, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import functional as F from ...generation.logits_process import AlternatingCodebooksLogitsProcessor, BarkEosPrioritizerLogitsProcessor, SuppressTokensLogitsProcessor from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput from ...modeling_utils import PreTrainedModel, get_parameter_device from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging from ..auto import AutoModel from .configuration_bark import BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, BarkSubModelConfig from .generation_configuration_bark import BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'suno/bark-small' _CONFIG_FOR_DOC = 'BarkConfig' class BarkSelfAttention(nn.Module): def __init__(self, config, is_causal=False): super().__init__() self.dropout = config.dropout self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if config.hidden_size % config.num_heads != 0: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) self.is_causal = is_causal if is_causal: block_size = config.block_size bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) self.register_buffer('bias', bias) def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.transpose(1, 2).contiguous() tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) if self.is_causal: (query_length, key_length) = (query.size(-2), key.size(-2)) attn_weights = attn_weights.masked_fill(self.bias[:, :, key_length - query_length:key_length, :key_length] == 0, torch.finfo(attn_weights.dtype).min) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def forward(self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False): (query, key, value) = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: past_key = past_key_values[0] past_value = past_key_values[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs class BarkSelfFlashAttention2(BarkSelfAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) return tensor def forward(self, hidden_states, attention_mask=None, past_key_values=None, head_mask=None, use_cache=False, output_attentions=False): (batch_size, query_len, _) = hidden_states.size() (query, key, value) = self.att_proj(hidden_states).split(self.embed_dim, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if past_key_values is not None: past_key = past_key_values[0].transpose(1, 2) past_value = past_key_values[1].transpose(1, 2) key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache is True: present = (key.transpose(1, 2), value.transpose(1, 2)) else: present = None attn_output = _flash_attention_forward(query, key, value, attention_mask, query_len, dropout=self.dropout, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: attn_weights = None outputs += (attn_weights,) return outputs BARK_ATTENTION_CLASSES = {'eager': BarkSelfAttention, 'flash_attention_2': BarkSelfFlashAttention2} class BarkLayerNorm(nn.Module): def __init__(self, hidden_size, bias=True): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None def forward(self, input): return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-05) class BarkMLP(nn.Module): def __init__(self, config): super().__init__() self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) self.dropout = nn.Dropout(config.dropout) self.gelu = nn.GELU() def forward(self, hidden_states): hidden_states = self.in_proj(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.out_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BarkBlock(nn.Module): def __init__(self, config, is_causal=False): super().__init__() if is_causal: self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias) self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias) else: self.layernorm_1 = nn.LayerNorm(config.hidden_size) self.layernorm_2 = nn.LayerNorm(config.hidden_size) self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](config, is_causal=is_causal) self.mlp = BarkMLP(config) def forward(self, hidden_states, past_key_values=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False): intermediary_hidden_states = self.layernorm_1(hidden_states) attn_outputs = self.attn(intermediary_hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] intermediary_hidden_states = hidden_states + attn_output intermediary_hidden_states = intermediary_hidden_states + self.mlp(self.layernorm_2(intermediary_hidden_states)) if use_cache: outputs = (intermediary_hidden_states,) + outputs else: outputs = (intermediary_hidden_states,) + outputs[1:] return outputs class BarkPreTrainedModel(PreTrainedModel): config_class = BarkConfig supports_gradient_checkpointing = False _supports_flash_attn_2 = True def _init_weights(self, module): if isinstance(module, (nn.Linear,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def device(self) -> torch.device: if not hasattr(self, '_hf_hook'): return get_parameter_device(self) for module in self.modules(): if hasattr(module, '_hf_hook') and hasattr(module._hf_hook, 'execution_device') and (module._hf_hook.execution_device is not None): return torch.device(module._hf_hook.execution_device) return get_parameter_device(self) BARK_MODEL_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`{config}`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BARK_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BarkConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BARK_FINE_INPUTS_DOCSTRING = "\n Args:\n codebook_idx (`int`):\n Index of the codebook that will be predicted.\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is\n predicted recursively by attending the previously predicted channels. The model predicts on windows of\n length 1024.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET.\n input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If\n `past_key_values` is used, optionally only the last `input_embeds` have to be input (see\n `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into\n associated vectors than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `input_ids` of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you\n have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds`\n is used in priority instead of `input_ids`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class BarkCausalModel(BarkPreTrainedModel): config_class = BarkSubModelConfig def __init__(self, config): super().__init__(config) self.config = config self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias) self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): input_embeds = kwargs.get('input_embeds', None) attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if past_key_values is not None: seq_len = input_ids.shape[1] past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] input_embeds = None elif input_embeds is not None and kwargs.get('use_cache'): seq_len = input_embeds.shape[1] else: seq_len = input_ids.shape[1] if attention_mask is not None: attention_mask = attention_mask[:, :seq_len] if position_ids is not None: position_ids = position_ids[:, :seq_len] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None if input_embeds is not None and kwargs.get('use_cache'): return {'input_ids': None, 'input_embeds': input_embeds, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask} return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask} @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[torch.FloatTensor]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, input_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError('Training is not implemented yet for Bark - ensure you do not pass `labels` to the model.') if input_ids is not None and input_embeds is not None: raise ValueError('You cannot specify both input_ids and input_embeds at the same time') elif input_embeds is not None and past_key_values is None: pass elif input_ids is not None: input_embeds = self.input_embeds_layer(input_ids) elif input_embeds is not None: pass else: raise ValueError('You have to specify either input_ids or input_embeds') input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[-1] device = input_ids.device if input_ids is not None else input_embeds.device if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.layers)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) position_embeds = self.position_embeds_layer(position_ids) if attention_mask is not None: if batch_size <= 0: raise ValueError('batch_size has to be defined and > 0') if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: attention_mask = attention_mask.view(batch_size, -1) attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False present_key_values = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, (block, past_layer_key_values)) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, attention_mask, head_mask[i], use_cache, output_attentions) else: outputs = block(hidden_states, past_key_values=past_layer_key_values, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache: present_key_values = present_key_values + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_head(hidden_states) if not return_dict: return tuple((v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None)) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('Bark semantic (or text) model. It shares the same architecture as the coarse model.\n It is a GPT-2 like autoregressive model with a language modeling head on top.', BARK_MODEL_START_DOCSTRING.format(config='BarkSemanticConfig')) class BarkSemanticModel(BarkCausalModel): base_model_prefix = 'semantic' config_class = BarkSemanticConfig def generate(self, input_ids: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig=None, history_prompt: Optional[Dict[str, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> torch.LongTensor: if semantic_generation_config is None: raise ValueError('`semantic_generation_config` has to be provided') batch_size = input_ids.shape[0] max_input_semantic_length = semantic_generation_config.max_input_semantic_length input_ids = input_ids + semantic_generation_config.text_encoding_offset if attention_mask is not None: input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) if history_prompt is not None: semantic_history = history_prompt['semantic_prompt'][-max_input_semantic_length:] semantic_history = nn.functional.pad(semantic_history, (0, max_input_semantic_length - len(semantic_history)), value=semantic_generation_config.semantic_pad_token, mode='constant') else: semantic_history = torch.tensor([semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int).to(self.device) semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) infer_array = torch.tensor([[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int).to(self.device) input_embeds = torch.cat([self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) + self.input_embeds_layer(semantic_history[:, :max_input_semantic_length + 1]), self.input_embeds_layer(infer_array)], dim=1) tokens_to_suppress = list(range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token)) tokens_to_suppress.extend(list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size))) suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress, device=input_ids.device) min_eos_p = kwargs.get('min_eos_p', semantic_generation_config.min_eos_p) early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor(eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p, device=input_ids.device) semantic_output = super().generate(torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device), input_embeds=input_embeds, logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor], generation_config=semantic_generation_config, **kwargs) semantic_output = semantic_output[:, max_input_semantic_length + 1:] return semantic_output @add_start_docstrings('Bark coarse acoustics model.\n It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a\n language modeling head on top.', BARK_MODEL_START_DOCSTRING.format(config='BarkCoarseConfig')) class BarkCoarseModel(BarkCausalModel): base_model_prefix = 'coarse_acoustics' config_class = BarkCoarseConfig def preprocess_histories(self, max_coarse_history: int, semantic_to_coarse_ratio: int, batch_size: int, semantic_generation_config: int, codebook_size: int, history_prompt: Optional[Dict[str, torch.Tensor]]=None): if history_prompt is not None: x_semantic_history = torch.repeat_interleave(history_prompt['semantic_prompt'][None], batch_size, dim=0) x_coarse_history = history_prompt['coarse_prompt'].clone() if codebook_size is not None: for n in range(1, x_coarse_history.shape[0]): x_coarse_history[n, :] += codebook_size * n x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1) x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) n_semantic_hist_provided = min([max_semantic_history, x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio))]) n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() x_coarse_history = x_coarse_history[:, :-2] else: x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) return (x_semantic_history, x_coarse_history) def generate(self, semantic_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig=None, coarse_generation_config: BarkCoarseGenerationConfig=None, codebook_size: int=1024, history_prompt: Optional[Dict[str, torch.Tensor]]=None, return_output_lengths: Optional[bool]=None, **kwargs) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]: if semantic_generation_config is None: raise ValueError('`semantic_generation_config` has to be provided') if coarse_generation_config is None: raise ValueError('`coarse_generation_config` has to be provided') max_coarse_input_length = coarse_generation_config.max_coarse_input_length max_coarse_history = coarse_generation_config.max_coarse_history sliding_window_len = coarse_generation_config.sliding_window_len semantic_output.masked_fill_(semantic_output == semantic_generation_config.semantic_pad_token, coarse_generation_config.coarse_semantic_pad_token) semantic_to_coarse_ratio = coarse_generation_config.coarse_rate_hz / semantic_generation_config.semantic_rate_hz * coarse_generation_config.n_coarse_codebooks max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) output_lengths = torch.floor(output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks) output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() max_generated_len = torch.max(output_lengths).item() batch_size = semantic_output.shape[0] (x_semantic_history, x_coarse) = self.preprocess_histories(history_prompt=history_prompt, max_coarse_history=max_coarse_history, semantic_to_coarse_ratio=semantic_to_coarse_ratio, batch_size=batch_size, semantic_generation_config=semantic_generation_config, codebook_size=codebook_size) base_semantic_idx = x_semantic_history.shape[1] semantic_output = torch.hstack([x_semantic_history, semantic_output]) n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) total_generated_len = 0 len_coarse_history = x_coarse.shape[1] for _ in range(n_window_steps): semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]):] input_coarse = input_coarse[:, :max_coarse_input_length] input_coarse = F.pad(input_coarse, (0, max_coarse_input_length - input_coarse.shape[-1]), 'constant', coarse_generation_config.coarse_semantic_pad_token) input_coarse = torch.hstack([input_coarse, torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device), x_coarse[:, -max_coarse_history:]]) alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor(input_coarse.shape[1], semantic_generation_config.semantic_vocab_size, codebook_size) output_coarse = super().generate(input_coarse, logits_processor=[alternatingLogitsProcessor], max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), generation_config=coarse_generation_config, **kwargs) input_coarse_len = input_coarse.shape[1] x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) total_generated_len = x_coarse.shape[1] - len_coarse_history del output_coarse coarse_output = x_coarse[:, len_coarse_history:] if return_output_lengths: return (coarse_output, output_lengths) return coarse_output @add_start_docstrings('Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and\n language modeling heads, one for each codebook.', BARK_MODEL_START_DOCSTRING.format(config='BarkFineConfig')) class BarkFineModel(BarkPreTrainedModel): base_model_prefix = 'fine_acoustics' config_class = BarkFineConfig main_input_name = 'codebook_idx' def __init__(self, config): super().__init__(config) self.config = config self.input_embeds_layers = nn.ModuleList([nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)]) self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) self.drop = nn.Dropout(config.dropout) self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)]) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self.layernorm_final = nn.LayerNorm(config.hidden_size) self.lm_heads = nn.ModuleList([nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) for _ in range(config.n_codes_given, config.n_codes_total)]) self.gradient_checkpointing = False self.n_codes_total = config.n_codes_total self.post_init() def get_input_embeddings(self): return self.input_embeds_layers def set_input_embeddings(self, new_embeddings): self.input_embeds_layers = new_embeddings def get_output_embeddings(self): return self.lm_heads def set_output_embeddings(self, new_output_embeddings): self.lm_heads = new_output_embeddings def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): old_embeddings_list = self.get_input_embeddings() new_embeddings_list = nn.ModuleList([self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) for old_embeddings in old_embeddings_list]) self.set_input_embeddings(new_embeddings_list) new_num_tokens = new_embeddings_list[0].weight.shape[0] if self.get_output_embeddings() is not None and (not self.config.tie_word_embeddings): old_lm_head_list = self.get_output_embeddings() new_lm_head_list = nn.ModuleList([self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list]) self.set_output_embeddings(new_lm_head_list) return self.get_input_embeddings() def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds self.config.output_vocab_size = model_embeds[0].weight.shape[0] self.config.vocab_size = model_embeds[0].weight.shape[0] self.output_vocab_size = model_embeds[0].weight.shape[0] self.vocab_size = model_embeds[0].weight.shape[0] self.tie_weights() return model_embeds def _tie_weights(self): if getattr(self.config, 'tie_word_embeddings', True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f'lm_heads.{i}.weight') def tie_weights(self): if getattr(self.config, 'tie_word_embeddings', True): self._tied_weights_keys = [] output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() for i in range(self.config.n_codes_total - self.config.n_codes_given): self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) self._tied_weights_keys.append(f'lm_heads.{i}.weight') for module in self.modules(): if hasattr(module, '_tie_weights'): module._tie_weights() @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING) def forward(self, codebook_idx: int, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, input_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError('Training is not implemented yet') if codebook_idx == 0: raise ValueError('Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model') if input_ids is not None and input_embeds is not None: raise ValueError('You cannot specify both input_ids and input_embeds at the same time') if input_ids is None and input_embeds is None: raise ValueError('You have to specify either input_ids or input_embeds') if input_ids is not None: input_embeds = [input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) for (i, input_embeds_layer) in enumerate(self.input_embeds_layers)] input_embeds = torch.cat(input_embeds, dim=-1) input_embeds = input_embeds[:, :, :, :codebook_idx + 1].sum(dim=-1) input_shape = input_embeds.size()[:-1] batch_size = input_embeds.shape[0] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else input_embeds.device if position_ids is None: position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) position_embeds = self.position_embeds_layer(position_ids) if attention_mask is not None: if batch_size <= 0: raise ValueError('batch_size has to be defined and > 0') if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None else: attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) head_mask = self.get_head_mask(head_mask, self.config.num_layers) hidden_states = self.drop(input_embeds + position_embeds) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, block) in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], output_attentions=output_attentions) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) hidden_states = self.layernorm_final(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) if not return_dict: return tuple((v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None)) return MaskedLMOutput(loss=loss, logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) def generate(self, coarse_output: torch.Tensor, semantic_generation_config: BarkSemanticGenerationConfig=None, coarse_generation_config: BarkCoarseGenerationConfig=None, fine_generation_config: BarkFineGenerationConfig=None, codebook_size: int=1024, history_prompt: Optional[Dict[str, torch.Tensor]]=None, **kwargs) -> torch.LongTensor: if semantic_generation_config is None: raise ValueError('`semantic_generation_config` has to be provided') if coarse_generation_config is None: raise ValueError('`coarse_generation_config` has to be provided') if fine_generation_config is None: raise ValueError('`fine_generation_config` has to be provided') temperature = kwargs.get('temperature', fine_generation_config.temperature) max_fine_history_length = fine_generation_config.max_fine_history_length max_fine_input_length = fine_generation_config.max_fine_input_length coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) batch_size = coarse_output.shape[0] if history_prompt is not None: x_fine_history = torch.repeat_interleave(history_prompt['fine_prompt'].T[None], batch_size, dim=0) else: x_fine_history = None n_coarse = coarse_generation_config.n_coarse_codebooks fine_input = F.pad(coarse_output, (0, fine_generation_config.n_fine_codebooks - n_coarse), 'constant', codebook_size) if x_fine_history is not None: fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] else: n_history = 0 n_remove_from_end = 0 if fine_input.shape[1] < max_fine_input_length: n_remove_from_end = max_fine_input_length - fine_input.shape[1] fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode='constant', value=codebook_size) n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length n_loops = int(np.ceil(n_loops)) n_loops = max(0, n_loops) + 1 for n_outer in range(n_loops): start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) start_fill_idx = min([n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length]) rel_start_fill_idx = start_fill_idx - start_idx input_buffer = fine_input[:, start_idx:start_idx + max_fine_input_length, :] for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): logits = self.forward(n_inner, input_buffer).logits if temperature is None or temperature == 1.0: relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] codebook_preds = torch.argmax(relevant_logits, -1) else: relevant_logits = logits[:, :, :codebook_size] / temperature probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] probs = probs.reshape((-1, codebook_size)) codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) codebook_preds = codebook_preds.to(torch.int32) input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds del logits, codebook_preds for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): fine_input[:, start_fill_idx:start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner] = input_buffer[:, rel_start_fill_idx:, n_inner] del input_buffer fine_input = fine_input.transpose(1, 2)[:, :, n_history:] if n_remove_from_end > 0: fine_input = fine_input[:, :, :-n_remove_from_end] if fine_input.shape[-1] != coarse_output.shape[-2]: raise ValueError('input and output should have the same seq_len') return fine_input @add_start_docstrings("\n The full Bark model, a text-to-speech model composed of 4 sub-models:\n - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that\n takes\n as input tokenized text, and predicts semantic text tokens that capture the meaning of the text.\n - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer,\n that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary\n to `encodec`.\n - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively\n predicts the last codebooks based on the sum of the previous codebooks embeddings.\n - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio\n array.\n\n It should be noted that each of the first three modules can support conditional speaker embeddings to condition the\n output sound according to specific predefined voice.\n ", BARK_START_DOCSTRING) class BarkModel(BarkPreTrainedModel): config_class = BarkConfig def __init__(self, config): super().__init__(config) self.semantic = BarkSemanticModel(config.semantic_config) self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) self.codec_model = AutoModel.from_config(config.codec_config) self.config = config @property def device(self) -> torch.device: if not hasattr(self.semantic, '_hf_hook'): return get_parameter_device(self) for module in self.semantic.modules(): if hasattr(module, '_hf_hook') and hasattr(module._hf_hook, 'execution_device') and (module._hf_hook.execution_device is not None): return torch.device(module._hf_hook.execution_device) def enable_cpu_offload(self, gpu_id: Optional[int]=0): if is_accelerate_available(): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate`.') device = torch.device(f'cuda:{gpu_id}') if self.device.type != 'cpu': self.to('cpu') torch.cuda.empty_cache() (self.semantic.input_embeds_layer, _) = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) hook = None for cpu_offloaded_model in [self.semantic, self.coarse_acoustics, self.fine_acoustics]: (_, hook) = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) self.fine_acoustics_hook = hook (_, hook) = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) self.codec_model_hook = hook def codec_decode(self, fine_output, output_lengths=None): fine_output = fine_output.transpose(0, 1) emb = self.codec_model.quantizer.decode(fine_output) if output_lengths is not None: out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] else: out = self.codec_model.decoder(emb) audio_arr = out.squeeze(1) return audio_arr @torch.no_grad() def generate(self, input_ids: Optional[torch.Tensor]=None, history_prompt: Optional[Dict[str, torch.Tensor]]=None, return_output_lengths: Optional[bool]=None, **kwargs) -> torch.LongTensor: semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) kwargs_semantic = {'attention_mask': kwargs.pop('attention_mask', None), 'min_eos_p': kwargs.pop('min_eos_p', None)} kwargs_coarse = {} kwargs_fine = {} for (key, value) in kwargs.items(): if key.startswith('semantic_'): key = key[len('semantic_'):] kwargs_semantic[key] = value elif key.startswith('coarse_'): key = key[len('coarse_'):] kwargs_coarse[key] = value elif key.startswith('fine_'): key = key[len('fine_'):] kwargs_fine[key] = value else: if key not in kwargs_semantic: kwargs_semantic[key] = value if key not in kwargs_coarse: kwargs_coarse[key] = value if key not in kwargs_fine: kwargs_fine[key] = value semantic_output = self.semantic.generate(input_ids, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, **kwargs_semantic) coarse_output = self.coarse_acoustics.generate(semantic_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, codebook_size=self.generation_config.codebook_size, return_output_lengths=return_output_lengths, **kwargs_coarse) output_lengths = None if return_output_lengths: (coarse_output, output_lengths) = coarse_output output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks output = self.fine_acoustics.generate(coarse_output, history_prompt=history_prompt, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=self.generation_config.codebook_size, **kwargs_fine) if getattr(self, 'fine_acoustics_hook', None) is not None: self.fine_acoustics_hook.offload() self.codec_model = self.codec_model.to(self.device) audio = self.codec_decode(output, output_lengths) if getattr(self, 'codec_model_hook', None) is not None: self.codec_model_hook.offload() if return_output_lengths: output_lengths = [len(sample) for sample in audio] audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) return (audio, output_lengths) return audio @classmethod def _check_and_enable_flash_attn_2(cls, config, torch_dtype: Optional[torch.dtype]=None, device_map: Optional[Union[str, Dict[str, int]]]=None, hard_check_only: bool=False, check_device_map: bool=False): config = super()._check_and_enable_flash_attn_2(config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map) config.semantic_config._attn_implementation = config._attn_implementation config.coarse_acoustics_config._attn_implementation = config._attn_implementation config.fine_acoustics_config._attn_implementation = config._attn_implementation return config # File: transformers-main/src/transformers/models/bark/processing_bark.py """""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer logger = logging.get_logger(__name__) class BarkProcessor(ProcessorMixin): tokenizer_class = 'AutoTokenizer' attributes = ['tokenizer'] preset_shape = {'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2} def __init__(self, tokenizer, speaker_embeddings=None): super().__init__(tokenizer) self.speaker_embeddings = speaker_embeddings @classmethod def from_pretrained(cls, pretrained_processor_name_or_path, speaker_embeddings_dict_path='speaker_embeddings_path.json', **kwargs): if speaker_embeddings_dict_path is not None: speaker_embeddings_path = get_file_from_repo(pretrained_processor_name_or_path, speaker_embeddings_dict_path, subfolder=kwargs.pop('subfolder', None), cache_dir=kwargs.pop('cache_dir', None), force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), resume_download=kwargs.pop('resume_download', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('use_auth_token', None), revision=kwargs.pop('revision', None)) if speaker_embeddings_path is None: logger.warning(f'`{os.path.join(pretrained_processor_name_or_path, speaker_embeddings_dict_path)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.') speaker_embeddings = None else: with open(speaker_embeddings_path) as speaker_embeddings_json: speaker_embeddings = json.load(speaker_embeddings_json) else: speaker_embeddings = None tokenizer = AutoTokenizer.from_pretrained(pretrained_processor_name_or_path, **kwargs) return cls(tokenizer=tokenizer, speaker_embeddings=speaker_embeddings) def save_pretrained(self, save_directory, speaker_embeddings_dict_path='speaker_embeddings_path.json', speaker_embeddings_directory='speaker_embeddings', push_to_hub: bool=False, **kwargs): if self.speaker_embeddings is not None: os.makedirs(os.path.join(save_directory, speaker_embeddings_directory, 'v2'), exist_ok=True) embeddings_dict = {} embeddings_dict['repo_or_path'] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != 'repo_or_path': voice_preset = self._load_voice_preset(prompt_key) tmp_dict = {} for key in self.speaker_embeddings[prompt_key]: np.save(os.path.join(embeddings_dict['repo_or_path'], speaker_embeddings_directory, f'{prompt_key}_{key}'), voice_preset[key], allow_pickle=False) tmp_dict[key] = os.path.join(speaker_embeddings_directory, f'{prompt_key}_{key}.npy') embeddings_dict[prompt_key] = tmp_dict with open(os.path.join(save_directory, speaker_embeddings_dict_path), 'w') as fp: json.dump(embeddings_dict, fp) super().save_pretrained(save_directory, push_to_hub, **kwargs) def _load_voice_preset(self, voice_preset: str=None, **kwargs): voice_preset_paths = self.speaker_embeddings[voice_preset] voice_preset_dict = {} for key in ['semantic_prompt', 'coarse_prompt', 'fine_prompt']: if key not in voice_preset_paths: raise ValueError(f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].') path = get_file_from_repo(self.speaker_embeddings.get('repo_or_path', '/'), voice_preset_paths[key], subfolder=kwargs.pop('subfolder', None), cache_dir=kwargs.pop('cache_dir', None), force_download=kwargs.pop('force_download', False), proxies=kwargs.pop('proxies', None), resume_download=kwargs.pop('resume_download', None), local_files_only=kwargs.pop('local_files_only', False), token=kwargs.pop('use_auth_token', None), revision=kwargs.pop('revision', None)) if path is None: raise ValueError(f"`{os.path.join(self.speaker_embeddings.get('repo_or_path', '/'), voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.") voice_preset_dict[key] = np.load(path) return voice_preset_dict def _validate_voice_preset_dict(self, voice_preset: Optional[dict]=None): for key in ['semantic_prompt', 'coarse_prompt', 'fine_prompt']: if key not in voice_preset: raise ValueError(f'Voice preset unrecognized, missing {key} as a key.') if not isinstance(voice_preset[key], np.ndarray): raise TypeError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') if len(voice_preset[key].shape) != self.preset_shape[key]: raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.') def __call__(self, text=None, voice_preset=None, return_tensors='pt', max_length=256, add_special_tokens=False, return_attention_mask=True, return_token_type_ids=False, **kwargs): if voice_preset is not None and (not isinstance(voice_preset, dict)): if isinstance(voice_preset, str) and self.speaker_embeddings is not None and (voice_preset in self.speaker_embeddings): voice_preset = self._load_voice_preset(voice_preset) else: if isinstance(voice_preset, str) and (not voice_preset.endswith('.npz')): voice_preset = voice_preset + '.npz' voice_preset = np.load(voice_preset) if voice_preset is not None: self._validate_voice_preset_dict(voice_preset, **kwargs) voice_preset = BatchFeature(data=voice_preset, tensor_type=return_tensors) encoded_text = self.tokenizer(text, return_tensors=return_tensors, padding='max_length', max_length=max_length, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, add_special_tokens=add_special_tokens, **kwargs) if voice_preset is not None: encoded_text['history_prompt'] = voice_preset return encoded_text # File: transformers-main/src/transformers/models/bart/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_bart': ['BartConfig', 'BartOnnxConfig'], 'tokenization_bart': ['BartTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bart_fast'] = ['BartTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bart'] = ['BartForCausalLM', 'BartForConditionalGeneration', 'BartForQuestionAnswering', 'BartForSequenceClassification', 'BartModel', 'BartPreTrainedModel', 'BartPretrainedModel', 'PretrainedBartModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_bart'] = ['TFBartForConditionalGeneration', 'TFBartForSequenceClassification', 'TFBartModel', 'TFBartPretrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_bart'] = ['FlaxBartDecoderPreTrainedModel', 'FlaxBartForCausalLM', 'FlaxBartForConditionalGeneration', 'FlaxBartForQuestionAnswering', 'FlaxBartForSequenceClassification', 'FlaxBartModel', 'FlaxBartPreTrainedModel'] if TYPE_CHECKING: from .configuration_bart import BartConfig, BartOnnxConfig from .tokenization_bart import BartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bart_fast import BartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bart import BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, BartPreTrainedModel, BartPretrainedModel, PretrainedBartModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel, TFBartPretrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bart import FlaxBartDecoderPreTrainedModel, FlaxBartForCausalLM, FlaxBartForConditionalGeneration, FlaxBartForQuestionAnswering, FlaxBartForSequenceClassification, FlaxBartModel, FlaxBartPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bart/configuration_bart.py """""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) class BartConfig(PretrainedConfig): model_type = 'bart' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, num_labels=3, pad_token_id=1, bos_token_id=0, eos_token_id=2, is_encoder_decoder=True, decoder_start_token_id=2, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(num_labels=num_labels, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', False): self.forced_bos_token_id = self.bos_token_id warnings.warn(f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. The config can simply be saved and uploaded again to be fixed.') class BartOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework) decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, encoder_seq_length) = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] (num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] (num_encoder_layers, num_decoder_layers) = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 (num_encoder_layers, _) = self.num_layers (num_encoder_attention_heads, _) = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) # File: transformers-main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer from transformers.utils import logging FAIRSEQ_MODELS = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] extra_arch = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = ' Hello world! cécé herlolip' mnli_rename_keys = [('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias')] def remove_ignore_keys_(state_dict): ignore_keys = ['encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def load_xsum_checkpoint(checkpoint_path): sd = torch.load(checkpoint_path, map_location='cpu') hub_interface = torch.hub.load('pytorch/fairseq', 'bart.large.cnn').eval() hub_interface.model.load_state_dict(sd['model']) return hub_interface def make_linear_from_emb(emb): (vocab_size, emb_size) = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_bart_checkpoint(checkpoint_path, pytorch_dump_folder_path, hf_checkpoint_name=None): if not os.path.exists(checkpoint_path): bart = torch.hub.load('pytorch/fairseq', checkpoint_path).eval() else: bart = load_xsum_checkpoint(checkpoint_path) bart.model.upgrade_state_dict(bart.model.state_dict()) if hf_checkpoint_name is None: hf_checkpoint_name = checkpoint_path.replace('.', '-') config = BartConfig.from_pretrained(hf_checkpoint_name) tokens = bart.encode(SAMPLE_TEXT).unsqueeze(0) tokens2 = BartTokenizer.from_pretrained(hf_checkpoint_name).encode(SAMPLE_TEXT, return_tensors='pt').unsqueeze(0) if not torch.eq(tokens, tokens2).all(): raise ValueError(f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokens2}') if checkpoint_path == 'bart.large.mnli': state_dict = bart.state_dict() remove_ignore_keys_(state_dict) state_dict['model.shared.weight'] = state_dict['model.decoder.embed_tokens.weight'] for (src, dest) in mnli_rename_keys: rename_key(state_dict, src, dest) model = BartForSequenceClassification(config).eval() model.load_state_dict(state_dict) fairseq_output = bart.predict('mnli', tokens, return_logits=True) new_model_outputs = model(tokens)[0] else: state_dict = bart.model.state_dict() remove_ignore_keys_(state_dict) state_dict['shared.weight'] = state_dict['decoder.embed_tokens.weight'] fairseq_output = bart.extract_features(tokens) if hf_checkpoint_name == 'facebook/bart-large': model = BartModel(config).eval() model.load_state_dict(state_dict) new_model_outputs = model(tokens).model[0] else: model = BartForConditionalGeneration(config).eval() model.model.load_state_dict(state_dict) if hasattr(model, 'lm_head'): model.lm_head = make_linear_from_emb(model.model.shared) new_model_outputs = model.model(tokens)[0] if fairseq_output.shape != new_model_outputs.shape: raise ValueError(f'`fairseq_output` shape and `new_model_output` shape are different: fairseq_output.shape={fairseq_output.shape!r}, {new_model_outputs.shape}') if (fairseq_output != new_model_outputs).any().item(): raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`') Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum') args = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) # File: transformers-main/src/transformers/models/bart/modeling_bart.py """""" import copy import math import warnings from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_bart import BartConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/bart-base' _CONFIG_FOR_DOC = 'BartConfig' _EXPECTED_OUTPUT_SHAPE = [1, 8, 768] _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = 'valhalla/bart-large-sst2' _SEQ_CLASS_EXPECTED_LOSS = 0.0 _SEQ_CLASS_EXPECTED_OUTPUT = "'POSITIVE'" _CHECKPOINT_FOR_QA = 'valhalla/bart-large-finetuned-squadv1' _QA_EXPECTED_LOSS = 0.59 _QA_EXPECTED_OUTPUT = "' nice puppet'" def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class BartLearnedPositionalEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): (bsz, seq_len) = input_ids.shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device).expand(bsz, -1) return super().forward(positions + self.offset) class BartScaledWordEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class BartAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BartConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class BartFlashAttention2(BartAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: raise ValueError('BartFlashAttention2 attention does not support output_attentions') is_cross_attention = key_value_states is not None (bsz, q_len, _) = hidden_states.size() query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class BartSdpaAttention(BartAttention): def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions or layer_head_mask is not None: logger.warning_once('BartModel is using BartSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) is_causal = True if self.is_causal and attention_mask is None and (tgt_len > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None, past_key_value) BART_ATTENTION_CLASSES = {'eager': BartAttention, 'sdpa': BartSdpaAttention, 'flash_attention_2': BartFlashAttention2} class BartEncoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BART_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class BartDecoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BART_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BART_ATTENTION_CLASSES[config._attn_implementation](self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BartClassificationHead(nn.Module): def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BartPreTrainedModel(PreTrainedModel): config_class = BartConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _keys_to_ignore_on_load_unexpected = ['encoder.version', 'decoder.version'] _no_split_modules = ['BartEncoderLayer', 'BartDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids} return dummy_inputs class PretrainedBartModel(BartPreTrainedModel): def __init_subclass__(self): warnings.warn('The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.', FutureWarning) class BartPretrainedModel(BartPreTrainedModel): def __init_subclass__(self): warnings.warn('The class `PretrainedBartModel` has been depreciated, please use `BartPreTrainedModel` instead.', FutureWarning) BART_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BartConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BART_GENERATION_EXAMPLE = '\n Summarization example:\n\n ```python\n >>> from transformers import AutoTokenizer, BartForConditionalGeneration\n\n >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")\n\n >>> ARTICLE_TO_SUMMARIZE = (\n ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "\n ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "\n ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."\n ... )\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt")\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20)\n >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions\'\n ```\n\n Mask filling example:\n\n ```python\n >>> from transformers import AutoTokenizer, BartForConditionalGeneration\n\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base")\n >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-base")\n\n >>> TXT = "My friends are but they eat too many carbs."\n >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n [\'not\', \'good\', \'healthy\', \'great\', \'very\']\n ```\n' BART_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class BartEncoder(BartPreTrainedModel): def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = BartScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BartLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self._use_sdpa = config._attn_implementation == 'sdpa' self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_ids = input_ids.view(-1, input_ids.shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: if self._use_flash_attention_2: attention_mask = attention_mask if 0 in attention_mask else None elif self._use_sdpa and head_mask is None and (not output_attentions): attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) else: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class BartDecoder(BartPreTrainedModel): def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = BartScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BartLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self._use_sdpa = config._attn_implementation == 'sdpa' self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if self._use_flash_attention_2: attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif self._use_sdpa and (not output_attentions) and (cross_attn_head_mask is None): attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length) else: attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: if self._use_flash_attention_2: encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self._use_sdpa and cross_attn_head_mask is None and (not output_attentions): encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) else: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare BART Model outputting raw hidden-states without any specific head on top.', BART_START_DOCSTRING) class BartModel(BartPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: BartConfig): super().__init__(config) (padding_idx, vocab_size) = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = BartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) self.post_init() def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqModelOutput]: if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, self.config.decoder_start_token_id) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings('The BART Model with a language modeling head. Can be used for summarization.', BART_START_DOCSTRING) class BartForConditionalGeneration(BartPreTrainedModel): base_model_prefix = 'model' _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] _keys_to_ignore_on_load_missing = ['final_logits_bias'] def __init__(self, config: BartConfig): super().__init__(config) self.model = BartModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BART_GENERATION_EXAMPLE) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @add_start_docstrings('\n Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ', BART_START_DOCSTRING) class BartForSequenceClassification(BartPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: BartConfig, **kwargs): super().__init__(config, **kwargs) self.model = BartModel(config) self.classification_head = BartClassificationHead(config.d_model, config.d_model, config.num_labels, config.classifier_dropout) self.post_init() @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of tokens.') sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('\n BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BART_START_DOCSTRING) class BartForQuestionAnswering(BartPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_QA, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_loss=_QA_EXPECTED_LOSS, expected_output=_QA_EXPECTED_OUTPUT) def forward(self, input_ids: torch.Tensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) class BartDecoderWrapper(BartPreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = BartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @add_start_docstrings('\n BART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).\n ', BART_START_DOCSTRING) class BartForCausalLM(BartPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/bart/modeling_flax_bart.py """""" import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, FlaxSeq2SeqQuestionAnsweringModelOutput, FlaxSeq2SeqSequenceClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bart import BartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/bart-base' _CONFIG_FOR_DOC = 'BartConfig' BART_START_DOCSTRING = '\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BartConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' BART_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BART_ENCODE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BART_DECODE_INPUTS_DOCSTRING = '\n Args:\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n encoder_outputs (`tuple(tuple(jnp.ndarray)`):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxBartAttention(nn.Module): config: BartConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') dense = partial(nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) (self.q_proj, self.k_proj, self.v_proj) = (dense(), dense(), dense()) self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]: is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.q_proj(hidden_states) if is_cross_attention: key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) class FlaxBartEncoderLayer(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBartAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense(self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxBartEncoderLayerCollection(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBartEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers)] self.layerdrop = self.config.encoder_layerdrop def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions, deterministic) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxBartDecoderLayer(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBartAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxBartAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense(self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class FlaxBartDecoderLayerCollection(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBartDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers)] self.layerdrop = self.config.decoder_layerdrop def __call__(self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) class FlaxBartClassificationHead(nn.Module): config: BartConfig inner_dim: int num_classes: int pooler_dropout: float dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.dropout = nn.Dropout(rate=self.pooler_dropout) self.out_proj = nn.Dense(self.num_classes, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) def __call__(self, hidden_states: jnp.ndarray, deterministic: bool): hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = jnp.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states class FlaxBartEncoder(nn.Module): config: BartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 self.offset = 2 self.embed_positions = nn.Embed(self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs return FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class FlaxBartDecoder(nn.Module): config: BartConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 self.offset = 2 self.embed_positions = nn.Embed(self.config.max_position_embeddings + self.offset, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale positions = self.embed_positions(position_ids + self.offset) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) class FlaxBartModule(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.shared = nn.Embed(self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.encoder = FlaxBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = FlaxBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) class FlaxBartPreTrainedModel(FlaxPreTrainedModel): config_class = BartConfig base_model_prefix: str = 'model' module_class: nn.Module = None def __init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') input_ids = input_ids.at[..., -1].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache']) @add_start_docstrings(BART_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BartConfig) def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward) @add_start_docstrings(BART_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BartConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: (outputs, past) = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past) = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs) @add_start_docstrings('The bare Bart Model transformer outputting raw hidden-states without any specific head on top.', BART_START_DOCSTRING) class FlaxBartModel(FlaxBartPreTrainedModel): config: BartConfig dtype: jnp.dtype = jnp.float32 module_class = FlaxBartModule append_call_sample_docstring(FlaxBartModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) class FlaxBartForConditionalGenerationModule(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = FlaxBartModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_logits_bias = self.param('final_logits_bias', self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables['params']['shared']['embedding'] lm_logits = self.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput(logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('The BART Model with a language modeling head. Can be used for summarization.', BART_START_DOCSTRING) class FlaxBartForConditionalGeneration(FlaxBartPreTrainedModel): module_class = FlaxBartForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(BART_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BartConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables['params']['shared']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return (lm_logits, outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: (lm_logits, decoder_outputs) = outputs else: ((lm_logits, decoder_outputs), past) = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs def prepare_inputs_for_generation(self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array]=None, decoder_attention_mask: Optional[jax.Array]=None, encoder_outputs=None, **kwargs): (batch_size, seq_length) = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'encoder_attention_mask': attention_mask, 'decoder_attention_mask': extended_attention_mask, 'decoder_position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['decoder_position_ids'] = model_kwargs['decoder_position_ids'][:, -1:] + 1 return model_kwargs FLAX_BART_CONDITIONAL_GENERATION_DOCSTRING = '\n Returns:\n\n Summarization example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration\n\n >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")\n\n >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs["input_ids"]).sequences\n >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))\n ```\n\n Mask filling example:\n\n ```python\n >>> import jax\n >>> from transformers import AutoTokenizer, FlaxBartForConditionalGeneration\n\n >>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")\n\n >>> TXT = "My friends are but they eat too many carbs."\n >>> input_ids = tokenizer([TXT], return_tensors="jax")["input_ids"]\n\n >>> logits = model(input_ids).logits\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()\n >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)\n >>> values, predictions = jax.lax.top_k(probs, k=1)\n\n >>> tokenizer.decode(predictions).split()\n ```\n' overwrite_call_docstring(FlaxBartForConditionalGeneration, BART_INPUTS_DOCSTRING + FLAX_BART_CONDITIONAL_GENERATION_DOCSTRING) append_replace_return_docstrings(FlaxBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) class FlaxBartForSequenceClassificationModule(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 num_labels: Optional[int] = None def setup(self): self.model = FlaxBartModule(config=self.config, dtype=self.dtype) self.classification_head = FlaxBartClassificationHead(config=self.config, inner_dim=self.config.d_model, num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels, pooler_dropout=self.config.classifier_dropout) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) hidden_states = outputs[0] eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0) if not isinstance(eos_mask, jax.interpreters.partial_eval.DynamicJaxprTracer): if len(jnp.unique(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of tokens.') if any(eos_mask.sum(1) == 0): raise ValueError('There are missing tokens in input_ids') eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-06 eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0) sentence_representation = jnp.einsum('ijk, ij -> ijk', hidden_states, eos_mask).sum(1) logits = self.classification_head(sentence_representation, deterministic=deterministic) if not return_dict: output = (logits,) + outputs[1:] return output return FlaxSeq2SeqSequenceClassifierOutput(logits=logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('\n Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ', BART_START_DOCSTRING) class FlaxBartForSequenceClassification(FlaxBartPreTrainedModel): module_class = FlaxBartForSequenceClassificationModule dtype = jnp.float32 append_call_sample_docstring(FlaxBartForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxBartForQuestionAnsweringModule(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 num_labels = 2 def setup(self): self.model = FlaxBartModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = jnp.split(logits, logits.shape[-1], axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return output return FlaxSeq2SeqQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('\n BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BART_START_DOCSTRING) class FlaxBartForQuestionAnswering(FlaxBartPreTrainedModel): module_class = FlaxBartForQuestionAnsweringModule dtype = jnp.float32 append_call_sample_docstring(FlaxBartForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) class FlaxBartDecoderPreTrainedModel(FlaxPreTrainedModel): config_class = BartConfig base_model_prefix: str = 'model' module_class: nn.Module = None def __init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): config.is_decoder = True config.is_encoder_decoder = False module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} encoder_hidden_states = jnp.zeros(input_shape + (self.config.d_model,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, return_dict=False) return module_init_outputs['params'] def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(BART_DECODE_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, past_key_values: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if encoder_hidden_states is not None and encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxBartDecoderWrapper(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.d_model embed_tokens = nn.Embed(self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.decoder = FlaxBartDecoder(config=self.config, embed_tokens=embed_tokens, dtype=self.dtype) def __call__(self, *args, **kwargs): return self.decoder(*args, **kwargs) class FlaxBartForCausalLMModule(nn.Module): config: BartConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.model = FlaxBartDecoderWrapper(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables['params']['decoder']['embed_tokens']['embedding'] lm_logits = self.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('\n Bart Decoder Model with a language modeling head on top (linear layer with weights tied to the input embeddings)\n e.g for autoregressive tasks.\n ', BART_START_DOCSTRING) class FlaxBartForCausalLM(FlaxBartDecoderPreTrainedModel): module_class = FlaxBartForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxBartForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/bart/modeling_tf_bart.py """""" from __future__ import annotations import random from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, TFSeq2SeqSequenceClassifierOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bart import BartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/bart-large' _CONFIG_FOR_DOC = 'BartConfig' LARGE_NEGATIVE = -100000000.0 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) shifted_input_ids = tf.where(shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids) assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int=0): bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFBartLearnedPositionalEmbedding(keras.layers.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call(self, input_shape: Optional[tf.TensorShape]=None, past_key_values_length: int=0, position_ids: tf.Tensor | None=None): if position_ids is None: seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name='range') position_ids += past_key_values_length offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32 return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype)) class TFBartAttention(keras.layers.Layer): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='k_proj') self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='q_proj') self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='v_proj') self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='out_proj') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call(self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor | None]: is_cross_attention = key_value_states is not None (bsz, tgt_len, embed_dim) = shape_list(hidden_states) query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal(shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}') if attention_mask is not None: tf.debugging.assert_equal(shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}') attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal(shape_list(layer_head_mask), [self.num_heads], message=f'Head mask for a single layer should be of size {self.num_heads}, but is {shape_list(layer_head_mask)}') attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal(shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}') attn_output = tf.transpose(tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return (attn_output, attn_weights, past_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFBartEncoderLayer(keras.layers.Layer): def __init__(self, config: BartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBartAttention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name='self_attn') self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None, layer_head_mask: tf.Tensor | None, training: Optional[bool]=False) -> tf.Tensor: residual = hidden_states (hidden_states, self_attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask) tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}') hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBartDecoderLayer(keras.layers.Layer): def __init__(self, config: BartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBartAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name='self_attn', is_decoder=True) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.encoder_attn = TFBartAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name='encoder_attn', is_decoder=True) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='encoder_attn_layer_norm') self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'encoder_attn', None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, 'encoder_attn_layer_norm', None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBartClassificationHead(keras.layers.Layer): def __init__(self, inner_dim: int, num_classes: int, pooler_dropout: float, name: str, **kwargs): super().__init__(name=name, **kwargs) self.dense = keras.layers.Dense(inner_dim, name='dense') self.dropout = keras.layers.Dropout(pooler_dropout) self.out_proj = keras.layers.Dense(num_classes, name='out_proj') self.input_dim = inner_dim self.inner_dim = inner_dim def call(self, inputs): hidden_states = self.dropout(inputs) hidden_states = self.dense(hidden_states) hidden_states = keras.activations.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.input_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.inner_dim]) class TFBartPretrainedModel(TFPreTrainedModel): config_class = BartConfig base_model_prefix = 'model' @property def dummy_inputs(self): dummy_inputs = super().dummy_inputs dummy_inputs['input_ids'] = dummy_inputs['input_ids'] * 2 if 'decoder_input_ids' in dummy_inputs: dummy_inputs['decoder_input_ids'] = dummy_inputs['decoder_input_ids'] * 2 return dummy_inputs def tf_to_pt_weight_rename(self, tf_weight): if tf_weight == 'model.shared.weight': return (tf_weight, 'model.decoder.embed_tokens.weight') else: return (tf_weight,) BART_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`BartConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' BART_GENERATION_EXAMPLE = '\n Summarization example:\n\n ```python\n >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration\n\n >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")\n\n >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="tf")\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5)\n >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))\n ```\n\n Mask filling example:\n\n ```python\n >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration\n\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")\n >>> TXT = "My friends are but they eat too many carbs."\n\n >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large")\n >>> input_ids = tokenizer([TXT], return_tensors="tf")["input_ids"]\n >>> logits = model(input_ids).logits\n >>> probs = tf.nn.softmax(logits[0])\n >>> # probs[5] is associated with the mask token\n ```\n' BART_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tf.FloatTensor`, *optional*):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape `(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`). Set to `False` during training, `True` during generation\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @keras_serializable class TFBartEncoder(keras.layers.Layer): config_class = BartConfig '' def __init__(self, config: BartConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFBartLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.layers = [TFBartEncoderLayer(config, name=f'layers.{i}') for i in range(config.encoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_embedding') self.embed_dim = config.d_model @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) if attention_mask is not None: attention_mask = _expand_mask(attention_mask) else: attention_mask = None encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: tf.debugging.assert_equal(shape_list(head_mask)[0], len(self.layers), message=f'The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(head_mask)[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue (hidden_states, attn) = encoder_layer(hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None) if output_attentions: all_attentions += (attn,) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layernorm_embedding', None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.embed_dim]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBartDecoder(keras.layers.Layer): config_class = BartConfig '' def __init__(self, config: BartConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFBartLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFBartDecoderLayer(config, name=f'layers.{i}') for i in range(config.decoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_embedding') self.dropout = keras.layers.Dropout(config.dropout) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if position_ids is None: positions = self.embed_positions(input_shape, past_key_values_length) else: positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask(tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions and encoder_hidden_states is not None else None present_key_values = () if use_cache else None for (attn_mask_name, attn_mask) in [('head_mask', head_mask), ('cross_attn_head_mask', cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal(shape_list(attn_mask)[0], len(self.layers), message=f'The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for {shape_list(attn_mask)[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None (hidden_states, layer_self_attn, layer_cross_attn, present_key_value) = decoder_layer(hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return (hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns) else: return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layernorm_embedding', None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.config.d_model]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBartMainLayer(keras.layers.Layer): config_class = BartConfig def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs): super().__init__(**kwargs) self.config = config self.shared = keras.layers.Embedding(input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name='model.shared') self.shared.load_weight_prefix = 'model.shared' if load_weight_prefix is None else load_weight_prefix self.encoder = TFBartEncoder(config, self.shared, name='encoder') self.decoder = TFBartDecoder(config, self.shared, name='decoder') def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]: if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, self.config.decoder_start_token_id) if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) elif return_dict and (not isinstance(encoder_outputs, TFBaseModelOutput)): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) elif not return_dict and (not isinstance(encoder_outputs, tuple)): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder(decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.shared.load_weight_prefix + '/' + self.shared.name + '/'): self.shared.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings('The bare BART Model outputting raw hidden-states without any specific head on top.', BART_START_DOCSTRING) class TFBartModel(TFBartPretrainedModel): _requires_load_weight_prefix = True def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name='model') def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput(last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class BiasLayer(keras.layers.Layer): def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings('The BART Model with a language modeling head. Can be used for summarization.', BART_START_DOCSTRING) class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_missing = ['final_logits_bias'] _requires_load_weight_prefix = True def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name='model') self.use_cache = config.use_cache self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, config.vocab_size], initializer='zeros', trainable=False) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {'final_logits_bias': self.bias_layer.bias} def set_bias(self, value): vocab_size = value['final_logits_bias'].shape[-1] self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, vocab_size], initializer='zeros', trainable=False) self.bias_layer.bias.assign(value['final_logits_bias']) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BART_GENERATION_EXAMPLE) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput(logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: decoder_position_ids = past_key_values[0][0].shape[2] else: decoder_position_ids = tf.range(decoder_input_ids.shape[1]) return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_position_ids': decoder_position_ids, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, 'bias_layer', None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) @add_start_docstrings('\n Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n ', BART_START_DOCSTRING) class TFBartForSequenceClassification(TFBartPretrainedModel, TFSequenceClassificationLoss): def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name='model') self.classification_head = TFBartClassificationHead(config.d_model, config.num_labels, config.classifier_dropout, name='classification_head') @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, decoder_position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, decoder_head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqSequenceClassifierOutput, Tuple[tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) last_hidden_state = outputs[0] eos_mask = tf.equal(input_ids, self.config.eos_token_id) self_masked = tf.reshape(tf.boolean_mask(eos_mask, eos_mask), (tf.shape(input_ids)[0], -1)) tf.Assert(tf.reduce_all(self_masked[:, -1]), ['All examples must have the same number of tokens.']) masked = tf.reshape(tf.boolean_mask(last_hidden_state, eos_mask), (tf.shape(input_ids)[0], tf.shape(self_masked)[1], tf.shape(last_hidden_state)[-1])) sentence_representation = masked[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFSeq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def serving_output(self, output): logits = tf.convert_to_tensor(output.logits) pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqSequenceClassifierOutput(logits=logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, 'classification_head', None) is not None: with tf.name_scope(self.classification_head.name): self.classification_head.build(None) # File: transformers-main/src/transformers/models/bart/tokenization_bart.py import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class BartTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) # File: transformers-main/src/transformers/models/bart/tokenization_bart_fast.py import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} class BartTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = BartTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, trim_offsets=True, **kwargs): mask_token = AddedToken(mask_token, lstrip=True, normalized=True, special=True) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if 'sep' in state: state['sep'] = tuple(state['sep']) if 'cls' in state: state['cls'] = tuple(state['cls']) changes_to_apply = False if state.get('add_prefix_space', add_prefix_space) != add_prefix_space: state['add_prefix_space'] = add_prefix_space changes_to_apply = True if state.get('trim_offsets', trim_offsets) != trim_offsets: state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if is_split_into_words and (not self.add_prefix_space): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if is_split_into_words and (not self.add_prefix_space): raise ValueError(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # File: transformers-main/src/transformers/models/barthez/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_barthez'] = ['BarthezTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_barthez_fast'] = ['BarthezTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_barthez import BarthezTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_barthez_fast import BarthezTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/barthez/tokenization_barthez.py """""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'} SPIECE_UNDERLINE = '▁' class BarthezTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # File: transformers-main/src/transformers/models/barthez/tokenization_barthez_fast.py """""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: BarthezTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} SPIECE_UNDERLINE = '▁' class BarthezTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = BarthezTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # File: transformers-main/src/transformers/models/bartpho/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bartpho'] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bartpho/tokenization_bartpho.py """""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = '▁' VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} class BartphoTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, monolingual_vocab_file, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.monolingual_vocab_file = monolingual_vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.fairseq_tokens_to_ids = {} cnt = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(token)] = cnt cnt += 1 with open(monolingual_vocab_file, 'r', encoding='utf-8') as f: for line in f.readlines(): token = line.strip().split()[0] self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids) if str(mask_token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids) self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()} super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.fairseq_ids_to_tokens) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _convert_id_to_token(self, index): return self.fairseq_ids_to_tokens[index] def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) out_monolingual_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(out_monolingual_vocab_file) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file) elif not os.path.isfile(self.monolingual_vocab_file): with open(out_monolingual_vocab_file, 'w', encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(token)} \n') return (out_vocab_file, out_monolingual_vocab_file) # File: transformers-main/src/transformers/models/beit/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available _import_structure = {'configuration_beit': ['BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_beit'] = ['BeitFeatureExtractor'] _import_structure['image_processing_beit'] = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_beit'] = ['BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', 'BeitBackbone'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_beit'] = ['FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel'] if TYPE_CHECKING: from .configuration_beit import BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import BeitBackbone, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/beit/configuration_beit.py """""" import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class BeitConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'beit' def __init__(self, vocab_size=8192, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, out_features=None, out_indices=None, add_fpn=False, reshape_hidden_states=True, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.use_mask_token = use_mask_token self.use_absolute_position_embeddings = use_absolute_position_embeddings self.use_relative_position_bias = use_relative_position_bias self.use_shared_relative_position_bias = use_shared_relative_position_bias self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.use_mean_pooling = use_mean_pooling self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.semantic_loss_ignore_index = semantic_loss_ignore_index if 'segmentation_indices' in kwargs: warnings.warn('The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.', FutureWarning) out_indices = kwargs.pop('segmentation_indices') self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, self.num_hidden_layers + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) self.add_fpn = add_fpn self.reshape_hidden_states = reshape_hidden_states class BeitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 0.0001 # File: transformers-main/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config, has_lm_head=False, is_semantic=False): prefix = 'backbone.' if is_semantic else '' rename_keys = [] for i in range(config.num_hidden_layers): rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias')) rename_keys.extend([(f'{prefix}cls_token', 'beit.embeddings.cls_token'), (f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'), (f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias')]) if has_lm_head: rename_keys.extend([('mask_token', 'beit.embeddings.mask_token'), ('rel_pos_bias.relative_position_bias_table', 'beit.encoder.relative_position_bias.relative_position_bias_table'), ('rel_pos_bias.relative_position_index', 'beit.encoder.relative_position_bias.relative_position_index'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias')]) elif is_semantic: rename_keys.extend([('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias')]) else: rename_keys.extend([('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias')]) return rename_keys def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False): for i in range(config.num_hidden_layers): prefix = 'backbone.' if is_semantic else '' in_proj_weight = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight') q_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias') state_dict[f'beit.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.query.bias'] = q_bias state_dict[f'beit.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.value.bias'] = v_bias gamma_1 = state_dict.pop(f'{prefix}blocks.{i}.gamma_1') gamma_2 = state_dict.pop(f'{prefix}blocks.{i}.gamma_2') state_dict[f'beit.encoder.layer.{i}.lambda_1'] = gamma_1 state_dict[f'beit.encoder.layer.{i}.lambda_2'] = gamma_2 if not has_lm_head: table = state_dict.pop(f'{prefix}blocks.{i}.attn.relative_position_bias_table') index = state_dict.pop(f'{prefix}blocks.{i}.attn.relative_position_index') state_dict[f'beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table'] = table state_dict[f'beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index'] = index def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): config = BeitConfig() has_lm_head = False is_semantic = False repo_id = 'huggingface/label-files' if checkpoint_url[-9:-4] == 'pt22k': config.use_shared_relative_position_bias = True config.use_mask_token = True has_lm_head = True elif checkpoint_url[-9:-4] == 'ft22k': config.use_relative_position_bias = True config.num_labels = 21841 filename = 'imagenet-22k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} del id2label[9205] del id2label[15027] config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} elif checkpoint_url[-8:-4] == 'to1k': config.use_relative_position_bias = True config.num_labels = 1000 filename = 'imagenet-1k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} if '384' in checkpoint_url: config.image_size = 384 if '512' in checkpoint_url: config.image_size = 512 elif 'ade20k' in checkpoint_url: config.use_relative_position_bias = True config.num_labels = 150 filename = 'ade20k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} config.image_size = 640 is_semantic = True else: raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'") if 'base' in checkpoint_url: pass elif 'large' in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 if 'ade20k' in checkpoint_url: config.image_size = 640 config.out_indices = [7, 11, 15, 23] else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL") state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu', check_hash=True) state_dict = state_dict['model'] if 'ade20k' not in checkpoint_url else state_dict['state_dict'] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic) if is_semantic: for (key, val) in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith('backbone.fpn'): key = key.replace('backbone.fpn', 'fpn') state_dict[key] = val if checkpoint_url[-9:-4] == 'pt22k': model = BeitForMaskedImageModeling(config) elif 'ade20k' in checkpoint_url: model = BeitForSemanticSegmentation(config) else: model = BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) if is_semantic: image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False) ds = load_dataset('hf-internal-testing/fixtures_ade20k', split='test', trust_remote_code=True) image = Image.open(ds[0]['file']) else: image_processor = BeitImageProcessor(size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False) image = prepare_img() encoding = image_processor(images=image, return_tensors='pt') pixel_values = encoding['pixel_values'] outputs = model(pixel_values) logits = outputs.logits expected_shape = torch.Size([1, 1000]) if checkpoint_url[:-4].endswith('beit_base_patch16_224_pt22k'): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith('beit_large_patch16_224_pt22k'): expected_shape = torch.Size([1, 196, 8192]) elif checkpoint_url[:-4].endswith('beit_base_patch16_224_pt22k_ft22k'): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([2.2288, 2.4671, 0.7395]) expected_class_idx = 2397 elif checkpoint_url[:-4].endswith('beit_large_patch16_224_pt22k_ft22k'): expected_shape = torch.Size([1, 21841]) expected_logits = torch.tensor([1.6881, -0.2787, 0.5901]) expected_class_idx = 2396 elif checkpoint_url[:-4].endswith('beit_base_patch16_224_pt22k_ft1k'): expected_logits = torch.tensor([0.1241, 0.0798, -0.6569]) expected_class_idx = 285 elif checkpoint_url[:-4].endswith('beit_base_patch16_224_pt22k_ft22kto1k'): expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108]) expected_class_idx = 281 elif checkpoint_url[:-4].endswith('beit_base_patch16_384_pt22k_ft22kto1k'): expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith('beit_large_patch16_224_pt22k_ft1k'): expected_logits = torch.tensor([0.461, -0.0928, 0.2086]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith('beit_large_patch16_224_pt22k_ft22kto1k'): expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith('beit_large_patch16_384_pt22k_ft22kto1k'): expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith('beit_large_patch16_512_pt22k_ft22kto1k'): expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852]) expected_class_idx = 761 elif checkpoint_url[:-4].endswith('beit_base_patch16_640_pt22k_ft22ktoade20k'): expected_shape = (1, 150, 160, 160) expected_logits = torch.tensor([[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]]]) elif checkpoint_url[:-4].endswith('beit_large_patch16_640_pt22k_ft22ktoade20k'): expected_shape = (1, 150, 160, 160) expected_logits = torch.tensor([[[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]], [[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]], [[0.9895, 4.3467, 4.7663], [4.2476, 5.683, 6.1518], [4.555, 6.2495, 6.5154]]]) else: raise ValueError("Can't verify logits as model is not supported") if logits.shape != expected_shape: raise ValueError(f'Shape of logits not as expected. logits.shape={logits.shape!r}, expected_shape={expected_shape!r}') if not has_lm_head: if is_semantic: if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=0.001): raise ValueError('First elements of logits not as expected') else: print('Predicted class idx:', logits.argmax(-1).item()) if not torch.allclose(logits[0, :3], expected_logits, atol=0.001): raise ValueError('First elements of logits not as expected') if logits.argmax(-1).item() != expected_class_idx: raise ValueError('Predicted class index not as expected') Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_url', default='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.') args = parser.parse_args() convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/beit/feature_extraction_beit.py """""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor logger = logging.get_logger(__name__) class BeitFeatureExtractor(BeitImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use BeitImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/beit/image_processing_beit.py """""" from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, is_vision_available, logging from ...utils.deprecation import deprecate_kwarg if is_vision_available(): import PIL if is_torch_available(): import torch logger = logging.get_logger(__name__) class BeitImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] @deprecate_kwarg('reduce_labels', new_name='do_reduce_labels', version='4.41.0') @filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS) def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, rescale_factor: Union[int, float]=1 / 255, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_reduce_labels: bool=False, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 256, 'width': 256} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_reduce_labels = do_reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): image_processor_dict = image_processor_dict.copy() if 'reduce_labels' in image_processor_dict: image_processor_dict['do_reduce_labels'] = image_processor_dict.pop('reduce_labels') return super().from_dict(image_processor_dict, **kwargs) def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size, default_to_square=True, param_name='size') if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` argument must contain `height` and `width` keys. Got {size.keys()}') return resize(image, size=(size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def reduce_label(self, label: ImageInput) -> np.ndarray: label = to_numpy_array(label) label[label == 0] = 255 label = label - 1 label[label == 254] = 255 return label def _preprocess(self, image: ImageInput, do_reduce_labels: bool=None, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): if do_reduce_labels: image = self.reduce_label(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) return image def _preprocess_image(self, image: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) image = self._preprocess(image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def _preprocess_segmentation_map(self, segmentation_map: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_reduce_labels: bool=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): segmentation_map = to_numpy_array(segmentation_map) if segmentation_map.ndim == 2: segmentation_map = segmentation_map[None, ...] added_dimension = True input_data_format = ChannelDimension.FIRST else: added_dimension = False if input_data_format is None: input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=resample, size=size, do_center_crop=do_center_crop, crop_size=crop_size, do_normalize=False, do_rescale=False, input_data_format=ChannelDimension.FIRST) if added_dimension: segmentation_map = np.squeeze(segmentation_map, axis=0) segmentation_map = segmentation_map.astype(np.int64) return segmentation_map def __call__(self, images, segmentation_maps=None, **kwargs): return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs) @deprecate_kwarg('reduce_labels', new_name='do_reduce_labels', version='4.41.0') @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=True, param_name='size') resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and (not valid_images(segmentation_maps)): raise ValueError('Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) images = [self._preprocess_image(image=img, do_resize=do_resize, do_center_crop=do_center_crop, do_rescale=do_rescale, do_normalize=do_normalize, resample=resample, size=size, rescale_factor=rescale_factor, crop_size=crop_size, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in images] data = {'pixel_values': images} if segmentation_maps is not None: segmentation_maps = [self._preprocess_segmentation_map(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, resample=resample, size=size, do_center_crop=do_center_crop, crop_size=crop_size) for segmentation_map in segmentation_maps] data['labels'] = segmentation_maps return BatchFeature(data=data, tensor_type=return_tensors) def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple]=None): logits = outputs.logits if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() semantic_segmentation = [] for idx in range(len(logits)): resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = logits.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # File: transformers-main/src/transformers/models/beit/modeling_beit.py """""" import collections.abc import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, MaskedLMOutput, SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ...utils.backbone_utils import BackboneMixin from .configuration_beit import BeitConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BeitConfig' _CHECKPOINT_FOR_DOC = 'microsoft/beit-base-patch16-224-pt22k' _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] _IMAGE_CLASS_CHECKPOINT = 'microsoft/beit-base-patch16-224' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' @dataclass class BeitModelOutputWithPooling(BaseModelOutputWithPooling): def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class BeitDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class BeitEmbeddings(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if config.use_mask_token: self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) else: self.mask_token = None self.patch_embeddings = BeitPatchEmbeddings(config) self.patch_size = config.patch_size self.image_size = config.image_size if isinstance(config.image_size, collections.abc.Iterable) else (config.image_size, config.image_size) num_patches = self.patch_embeddings.num_patches if config.use_absolute_position_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) else: self.position_embeddings = None self.dropout = nn.Dropout(config.hidden_dropout_prob) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor: (_, _, height, width) = pixel_values.shape (embeddings, (patch_height, patch_width)) = self.patch_embeddings(pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None) (batch_size, seq_len, _) = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1 - w) + mask_tokens * w cls_tokens = self.cls_token.expand(batch_size, -1, -1) if self.position_embeddings is not None: if interpolate_pos_encoding: cls_tokens = cls_tokens + self.interpolate_pos_encoding(embeddings, height, width) else: cls_tokens = cls_tokens + self.position_embeddings[:, :1, :] embeddings = torch.cat((cls_tokens, embeddings), dim=1) embeddings = self.dropout(embeddings) return (embeddings, (patch_height, patch_width)) class BeitPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.patch_shape = patch_shape self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) (patch_height, patch_width) = (embeddings.shape[2], embeddings.shape[3]) if position_embedding is not None: position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, 1, 2) position_embedding = nn.functional.interpolate(position_embedding, size=(patch_height, patch_width), mode='bicubic') embeddings = embeddings + position_embedding embeddings = embeddings.flatten(2).transpose(1, 2) return (embeddings, (patch_height, patch_width)) class BeitSelfAttention(nn.Module): def __init__(self, config: BeitConfig, window_size: Optional[tuple]=None) -> None: super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) if window_size: self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size) else: self.relative_position_bias = None def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['BeitRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if self.relative_position_bias is not None: (height, width) = resolution window_size = (height // self.config.patch_size, width // self.config.patch_size) attention_scores = attention_scores + self.relative_position_bias(window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]) if relative_position_bias is not None: attention_scores = attention_scores + relative_position_bias attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class BeitSelfOutput(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BeitAttention(nn.Module): def __init__(self, config: BeitConfig, window_size: Optional[tuple]=None) -> None: super().__init__() self.attention = BeitSelfAttention(config, window_size=window_size) self.output = BeitSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['BeitRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BeitIntermediate(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BeitOutput(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class BeitLayer(nn.Module): def __init__(self, config: BeitConfig, window_size: Optional[tuple]=None, drop_path_rate: float=0.0) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BeitAttention(config, window_size=window_size) self.intermediate = BeitIntermediate(config) self.output = BeitOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) init_values = config.layer_scale_init_value if init_values > 0: self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True) self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True) else: (self.lambda_1, self.lambda_2) = (None, None) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['BeitRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]: self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, interpolate_pos_encoding=interpolate_pos_encoding, resolution=resolution) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] if self.lambda_1 is not None: attention_output = self.lambda_1 * attention_output hidden_states = self.drop_path(attention_output) + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output) if self.lambda_2 is not None: layer_output = self.lambda_2 * layer_output layer_output = self.drop_path(layer_output) + hidden_states outputs = (layer_output,) + outputs return outputs class BeitRelativePositionBias(nn.Module): def __init__(self, config: BeitConfig, window_size: tuple) -> None: super().__init__() self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, config.num_attention_heads)) self.relative_position_indices = {} def generate_relative_position_index(self, window_size: Tuple[int, int]) -> torch.Tensor: num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 window_area = window_size[0] * window_size[1] grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing='ij') coords = torch.stack(grid) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += window_size[0] - 1 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) relative_position_index[0, 0:] = num_relative_distance - 3 relative_position_index[0:, 0] = num_relative_distance - 2 relative_position_index[0, 0] = num_relative_distance - 1 return relative_position_index def forward(self, window_size, interpolate_pos_encoding: bool=False, dim_size=None) -> torch.Tensor: old_height = 2 * self.window_size[0] - 1 old_width = 2 * self.window_size[1] - 1 new_height = 2 * window_size[0] - 1 new_width = 2 * window_size[1] - 1 old_relative_position_bias_table = self.relative_position_bias_table old_num_relative_distance = self.num_relative_distance new_num_relative_distance = new_height * new_width + 3 old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3] old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2) new_sub_table = nn.functional.interpolate(old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode='bilinear') new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1) new_relative_position_bias_table = torch.cat([new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]]) key = window_size if key not in self.relative_position_indices.keys(): self.relative_position_indices[key] = self.generate_relative_position_index(window_size) relative_position_bias = new_relative_position_bias_table[self.relative_position_indices[key].view(-1)] relative_position_bias = relative_position_bias.view(window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() if interpolate_pos_encoding: relative_position_bias = nn.functional.interpolate(relative_position_bias.unsqueeze(1), size=(dim_size, dim_size), mode='bilinear', align_corners=False).squeeze(1) return relative_position_bias.unsqueeze(0) class BeitEncoder(nn.Module): def __init__(self, config: BeitConfig, window_size: Optional[tuple]=None) -> None: super().__init__() self.config = config if config.use_shared_relative_position_bias: self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size) else: self.relative_position_bias = None dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] self.layer = nn.ModuleList([BeitLayer(config, window_size=window_size if config.use_relative_position_bias else None, drop_path_rate=dpr[i]) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions) else: (height, width) = resolution window_size = (height // self.config.patch_size, width // self.config.patch_size) relative_position_bias = self.relative_position_bias(window_size, interpolate_pos_encoding=interpolate_pos_encoding, dim_size=hidden_states.shape[1]) if self.relative_position_bias is not None else None layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class BeitPreTrainedModel(PreTrainedModel): config_class = BeitConfig base_model_prefix = 'beit' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _no_split_modules = ['BeitLayer'] _keys_to_ignore_on_load_unexpected = ['.*relative_position_index.*'] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BEIT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`BeitConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BEIT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`BeitImageProcessor.__call__`] for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare Beit Model transformer outputting raw hidden-states without any specific head on top.', BEIT_START_DOCSTRING) class BeitModel(BeitPreTrainedModel): def __init__(self, config: BeitConfig, add_pooling_layer: bool=True) -> None: super().__init__(config) self.config = config self.embeddings = BeitEmbeddings(config) self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape) self.layernorm = nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = BeitPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BeitModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BeitModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) (embedding_output, _) = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding) resolution = pixel_values.shape[2:] encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, resolution=resolution, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BeitModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class BeitPooler(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.layernorm is not None: patch_tokens = hidden_states[:, 1:, :] pooled_output = self.layernorm(patch_tokens.mean(1)) else: pooled_output = hidden_states[:, 0] return pooled_output @add_start_docstrings("Beit Model transformer with a 'language' modeling head on top. BEiT does masked image modeling by predicting\n visual tokens of a Vector-Quantize Variational Autoencoder (VQ-VAE), whereas other vision models like ViT and DeiT\n predict RGB pixel values. As a result, this class is incompatible with [`AutoModelForMaskedImageModeling`], so you\n will need to use [`BeitForMaskedImageModeling`] directly if you wish to do masked image modeling with BEiT.", BEIT_START_DOCSTRING) class BeitForMaskedImageModeling(BeitPreTrainedModel): def __init__(self, config: BeitConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.beit = BeitModel(config, add_pooling_layer=False) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.beit(pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.layernorm(sequence_output) prediction_scores = self.lm_head(sequence_output[:, 1:]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores[bool_masked_pos], labels) if not return_dict: output = (prediction_scores,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final\n hidden states of the patch tokens) e.g. for ImageNet.\n ', BEIT_START_DOCSTRING) class BeitForImageClassification(BeitPreTrainedModel): def __init__(self, config: BeitConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.beit = BeitModel(config, add_pooling_layer=True) self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.beit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class BeitConvModule(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, Tuple[int, int]]=1) -> None: super().__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation) self.bn = nn.BatchNorm2d(out_channels) self.activation = nn.ReLU() def forward(self, input: torch.Tensor) -> torch.Tensor: output = self.conv(input) output = self.bn(output) output = self.activation(output) return output class BeitPyramidPoolingBlock(nn.Module): def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None: super().__init__() self.layers = [nn.AdaptiveAvgPool2d(pool_scale), BeitConvModule(in_channels, channels, kernel_size=1)] for (i, layer) in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state class BeitPyramidPoolingModule(nn.Module): def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None: super().__init__() self.pool_scales = pool_scales self.align_corners = align_corners self.in_channels = in_channels self.channels = channels self.blocks = [] for (i, pool_scale) in enumerate(pool_scales): block = BeitPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels) self.blocks.append(block) self.add_module(str(i), block) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: ppm_outs = [] for ppm in self.blocks: ppm_out = ppm(x) upsampled_ppm_out = nn.functional.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners) ppm_outs.append(upsampled_ppm_out) return ppm_outs class BeitUperHead(nn.Module): def __init__(self, config: BeitConfig) -> None: super().__init__() self.pool_scales = config.pool_scales self.in_channels = [config.hidden_size] * 4 self.channels = config.hidden_size self.align_corners = False self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) self.psp_modules = BeitPyramidPoolingModule(self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners) self.bottleneck = BeitConvModule(self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding=1) self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for in_channels in self.in_channels[:-1]: l_conv = BeitConvModule(in_channels, self.channels, kernel_size=1) fpn_conv = BeitConvModule(self.channels, self.channels, kernel_size=3, padding=1) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) self.fpn_bottleneck = BeitConvModule(len(self.in_channels) * self.channels, self.channels, kernel_size=3, padding=1) def psp_forward(self, inputs): x = inputs[-1] psp_outs = [x] psp_outs.extend(self.psp_modules(x)) psp_outs = torch.cat(psp_outs, dim=1) output = self.bottleneck(psp_outs) return output def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: laterals = [lateral_conv(encoder_hidden_states[i]) for (i, lateral_conv) in enumerate(self.lateral_convs)] laterals.append(self.psp_forward(encoder_hidden_states)) used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): prev_shape = laterals[i - 1].shape[2:] laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(laterals[i], size=prev_shape, mode='bilinear', align_corners=self.align_corners) fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)] fpn_outs.append(laterals[-1]) for i in range(used_backbone_levels - 1, 0, -1): fpn_outs[i] = nn.functional.interpolate(fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) fpn_outs = torch.cat(fpn_outs, dim=1) output = self.fpn_bottleneck(fpn_outs) output = self.classifier(output) return output class BeitFCNHead(nn.Module): def __init__(self, config: BeitConfig, in_index: int=2, kernel_size: int=3, dilation: Union[int, Tuple[int, int]]=1) -> None: super().__init__() self.in_channels = config.hidden_size self.channels = config.auxiliary_channels self.num_convs = config.auxiliary_num_convs self.concat_input = config.auxiliary_concat_input self.in_index = in_index conv_padding = kernel_size // 2 * dilation convs = [] convs.append(BeitConvModule(self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation)) for i in range(self.num_convs - 1): convs.append(BeitConvModule(self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation)) if self.num_convs == 0: self.convs = nn.Identity() else: self.convs = nn.Sequential(*convs) if self.concat_input: self.conv_cat = BeitConvModule(self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2) self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1) def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = encoder_hidden_states[self.in_index] output = self.convs(hidden_states) if self.concat_input: output = self.conv_cat(torch.cat([hidden_states, output], dim=1)) output = self.classifier(output) return output @add_start_docstrings('\n Beit Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.\n ', BEIT_START_DOCSTRING) class BeitForSemanticSegmentation(BeitPreTrainedModel): def __init__(self, config: BeitConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.beit = BeitModel(config, add_pooling_layer=False) if len(self.config.out_indices) != 4: raise ValueError('BeitForSemanticSegmentation requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.') self.fpn1 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2), nn.BatchNorm2d(config.hidden_size), nn.GELU(), nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2)) self.fpn2 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2)) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) self.decode_head = BeitUperHead(config) self.auxiliary_head = BeitFCNHead(config) if config.use_auxiliary_head else None self.post_init() def compute_loss(self, logits, auxiliary_logits, labels): upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False) if auxiliary_logits is not None: upsampled_auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=labels.shape[-2:], mode='bilinear', align_corners=False) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) main_loss = loss_fct(upsampled_logits, labels) loss = main_loss if auxiliary_logits is not None: auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels) loss += self.config.auxiliary_loss_weight * auxiliary_loss return loss @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if labels is not None and self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one') outputs = self.beit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] features = [feature for (idx, feature) in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices] batch_size = pixel_values.shape[0] patch_resolution = self.config.image_size // self.config.patch_size features = [x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features] ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) logits = self.decode_head(features) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(features) loss = None if labels is not None: loss = self.compute_loss(logits, auxiliary_logits, labels) if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) @add_start_docstrings('\n BEiT backbone, to be used with frameworks like DETR and MaskFormer.\n ', BEIT_START_DOCSTRING) class BeitBackbone(BeitPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = BeitEmbeddings(config) self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape) if config.add_fpn: if len(self.config.out_indices) != 4: raise ValueError('BeitBackbone requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.') hidden_size = config.hidden_size self.fpn1 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2), nn.BatchNorm2d(hidden_size, eps=config.batch_norm_eps), nn.GELU(), nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2)) self.fpn2 = nn.Sequential(nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2)) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions batch_size = pixel_values.shape[0] (embedding_output, (patch_height, patch_width)) = self.embeddings(pixel_values) resolution = pixel_values.shape[2:] outputs = self.encoder(embedding_output, output_hidden_states=True, output_attentions=output_attentions, resolution=resolution, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for (stage, hidden_state) in zip(self.stage_names, hidden_states): if stage in self.out_features: if self.config.reshape_hidden_states: hidden_state = hidden_state[:, 1:, :] hidden_state = hidden_state.permute(0, 2, 1) hidden_state = hidden_state.reshape(batch_size, -1, patch_height, patch_width) feature_maps += (hidden_state,) if self.config.add_fpn: feature_maps = [self.fpn1(feature_maps[0]), self.fpn2(feature_maps[1]), self.fpn3(feature_maps[2]), self.fpn4(feature_maps[3])] feature_maps = tuple(feature_maps) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/beit/modeling_flax_beit.py from typing import Callable, List, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxMaskedLMOutput, FlaxSequenceClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from .configuration_beit import BeitConfig @flax.struct.dataclass class FlaxBeitModelOutputWithPooling(FlaxBaseModelOutputWithPooling): BEIT_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BeitConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' BEIT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`AutoImageProcessor.__call__`] for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def relative_position_index_init(window_size: Tuple[int, int]) -> jnp.ndarray: num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 coords_h = np.arange(window_size[0]) coords_w = np.arange(window_size[1]) coords = np.stack(np.meshgrid(coords_h, coords_w, indexing='ij')) coords_flatten = np.reshape(coords, (2, -1)) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = np.transpose(relative_coords, (1, 2, 0)) relative_coords[:, :, 0] += window_size[0] - 1 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = np.zeros(shape=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) relative_position_index[0, 0:] = num_relative_distance - 3 relative_position_index[0:, 0] = num_relative_distance - 2 relative_position_index[0, 0] = num_relative_distance - 1 return jnp.array(relative_position_index) def ones_with_scale(key, shape, scale, dtype=jnp.float32): return jnp.ones(shape, dtype) * scale class FlaxBeitDropPath(nn.Module): rate: float @nn.module.compact def __call__(self, inputs, deterministic: Optional[bool]=True): if self.rate == 0.0: return inputs keep_prob = 1.0 - self.rate if deterministic: return inputs else: shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) rng = self.make_rng('droppath') random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype) binary_tensor = jnp.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output class FlaxBeitPatchEmbeddings(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.num_channels = self.config.num_channels image_size = self.config.image_size patch_size = self.config.patch_size num_patches = image_size // patch_size * (image_size // patch_size) patch_shape = (image_size // patch_size, image_size // patch_size) self.num_patches = num_patches self.patch_shape = patch_shape self.projection = nn.Conv(self.config.hidden_size, kernel_size=(patch_size, patch_size), strides=(patch_size, patch_size), padding='VALID', dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) def __call__(self, pixel_values): num_channels = pixel_values.shape[-1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) (batch_size, _, _, channels) = embeddings.shape return jnp.reshape(embeddings, (batch_size, -1, channels)) class FlaxBeitEmbeddings(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.cls_token = self.param('cls_token', nn.initializers.zeros, (1, 1, self.config.hidden_size)) if self.config.use_mask_token: self.mask_token = self.param('mask_token', nn.initializers.zeros, (1, 1, self.config.hidden_size)) self.patch_embeddings = FlaxBeitPatchEmbeddings(self.config, dtype=self.dtype) num_patches = self.patch_embeddings.num_patches if self.config.use_absolute_position_embeddings: self.position_embeddings = self.param('position_embeddings', nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, pixel_values, bool_masked_pos=None, deterministic=True): embeddings = self.patch_embeddings(pixel_values) (batch_size, seq_len, _) = embeddings.shape cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size)) cls_tokens = cls_tokens.astype(embeddings.dtype) if bool_masked_pos is not None: mask_tokens = jnp.broadcast_to(self.mask_token, (batch_size, seq_len, self.config.hidden_size)) mask_tokens = mask_tokens.astype(embeddings.dtype) w = jnp.expand_dims(bool_masked_pos, axis=-1) embeddings = embeddings * (1 - w) + mask_tokens * w embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1) if self.config.use_absolute_position_embeddings: embeddings = embeddings + self.position_embeddings.astype(embeddings.dtype) embeddings = self.dropout(embeddings, deterministic=deterministic) return embeddings class FlaxBeitRelativePositionBias(nn.Module): config: BeitConfig window_size: Tuple[int, int] dtype: jnp.dtype = jnp.float32 def setup(self): num_relative_distance = (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) + 3 self.relative_position_bias_table = self.param('relative_position_bias_table', nn.initializers.zeros, (num_relative_distance, self.config.num_attention_heads)) self.relative_position_index = relative_position_index_init(self.window_size) def __call__(self): index = self.relative_position_index.reshape(-1) shape = (self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) relative_position_bias = self.relative_position_bias_table[index].reshape(shape) return jnp.transpose(relative_position_bias, (2, 0, 1)) class FlaxBeitSelfAttention(nn.Module): config: BeitConfig window_size: Tuple[int, int] dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0 and (not hasattr(self.config, 'embedding_size')): raise ValueError(f'The hidden size {(self.config.hidden_size,)} is not a multiple of the number of attention heads {self.config.num_attention_heads}.') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), use_bias=False) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.relative_position_bias = FlaxBeitRelativePositionBias(self.config, window_size=self.window_size, dtype=self.dtype) if self.window_size else None def __call__(self, hidden_states, relative_position_bias=None, deterministic: bool=True, output_attentions: bool=False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) value_states = self.value(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) key_states = self.key(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attention_bias = jnp.array(0.0, dtype=self.dtype) if self.relative_position_bias is not None: attention_bias = jnp.expand_dims(self.relative_position_bias(), 0) attention_bias = attention_bias.astype(query_states.dtype) if relative_position_bias is not None: attention_bias = attention_bias + relative_position_bias.astype(attention_bias.dtype) attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxBeitSelfOutput(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxBeitAttention(nn.Module): config: BeitConfig window_size: Tuple[int, int] dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxBeitSelfAttention(self.config, self.window_size, dtype=self.dtype) self.output = FlaxBeitSelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, relative_position_bias=None, deterministic=True, output_attentions: bool=False): attn_outputs = self.attention(hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] attn_output = self.output(attn_output, deterministic=deterministic) outputs = (attn_output,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxBeitIntermediate(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxBeitOutput(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxBeitLayer(nn.Module): config: BeitConfig window_size: Tuple[int, int] drop_path_rate: float dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxBeitAttention(self.config, self.window_size, dtype=self.dtype) self.intermediate = FlaxBeitIntermediate(self.config, dtype=self.dtype) self.output = FlaxBeitOutput(self.config, dtype=self.dtype) self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.drop_path = FlaxBeitDropPath(rate=self.drop_path_rate) self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.init_values = self.config.layer_scale_init_value if self.init_values > 0: self.lambda_1 = self.param('lambda_1', ones_with_scale, self.config.hidden_size, self.init_values) self.lambda_2 = self.param('lambda_2', ones_with_scale, self.config.hidden_size, self.init_values) else: self.lambda_1 = None self.lambda_2 = None def __call__(self, hidden_states, relative_position_bias=None, deterministic: bool=True, output_attentions: bool=False): self_attention_outputs = self.attention(self.layernorm_before(hidden_states), relative_position_bias, deterministic=deterministic, output_attentions=output_attentions) attention_output = self_attention_outputs[0] if self.lambda_1 is not None: attention_output = self.lambda_1.astype(attention_output.dtype) * attention_output hidden_states = self.drop_path(attention_output, deterministic=deterministic) + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, deterministic=deterministic) if self.lambda_2 is not None: layer_output = self.lambda_2.astype(layer_output.dtype) * layer_output layer_output = self.drop_path(layer_output, deterministic=deterministic) + hidden_states outputs = (layer_output,) if output_attentions: outputs += (self_attention_outputs[1],) return outputs class FlaxBeitLayerCollection(nn.Module): config: BeitConfig window_size: Tuple[int, int] drop_path_rates: List[float] relative_position_bias: Callable[[], jnp.ndarray] dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBeitLayer(self.config, window_size=self.window_size if self.config.use_relative_position_bias else None, drop_path_rate=self.drop_path_rates[i], name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) relative_position_bias = self.relative_position_bias() if self.relative_position_bias is not None else None layer_outputs = layer(hidden_states, relative_position_bias, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxBeitEncoder(nn.Module): config: BeitConfig window_size: Tuple[int, int] dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.use_shared_relative_position_bias: self.relative_position_bias = FlaxBeitRelativePositionBias(config=self.config, window_size=self.window_size, dtype=self.dtype) drop_path_rates = list(np.linspace(0, self.config.drop_path_rate, self.config.num_hidden_layers)) self.layer = FlaxBeitLayerCollection(self.config, window_size=self.window_size, drop_path_rates=drop_path_rates, relative_position_bias=self.relative_position_bias if self.config.use_shared_relative_position_bias else None, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxBeitPreTrainedModel(FlaxPreTrainedModel): config_class = BeitConfig base_model_prefix = 'beit' main_input_name = 'pixel_values' module_class: nn.Module = None def __init__(self, config: BeitConfig, input_shape=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: pixel_values = jnp.zeros(input_shape, dtype=self.dtype) (params_rng, dropout_rng) = jax.random.split(rng) (dropout_rng, droppath_rng) = jax.random.split(dropout_rng) rngs = {'params': params_rng, 'dropout': dropout_rng, 'droppath': droppath_rng} random_params = self.module.init(rngs, pixel_values, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(BEIT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, pixel_values, bool_masked_pos=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) rngs = {} if dropout_rng is not None: (dropout_rng, droppath_rng) = jax.random.split(dropout_rng) rngs['dropout'] = dropout_rng rngs['droppath'] = droppath_rng return self.module.apply({'params': params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), bool_masked_pos, not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxBeitPooler(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.use_mean_pooling: self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): if self.config.use_mean_pooling: patch_tokens = hidden_states[:, 1:, :] pooled_output = self.layernorm(jnp.mean(patch_tokens, axis=1)) else: pooled_output = hidden_states[:, 0] return pooled_output class FlaxBeitModule(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True def setup(self): self.embeddings = FlaxBeitEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxBeitEncoder(self.config, window_size=self.embeddings.patch_embeddings.patch_shape, dtype=self.dtype) if not self.config.use_mean_pooling: self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxBeitPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None def __call__(self, pixel_values, bool_masked_pos=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): hidden_states = self.embeddings(pixel_values, bool_masked_pos, deterministic=deterministic) outputs = self.encoder(hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if not self.config.use_mean_pooling: hidden_states = self.layernorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBeitModelOutputWithPooling(last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('The bare Beit Model transformer outputting raw hidden-states without any specific head on top.', BEIT_START_DOCSTRING) class FlaxBeitModel(FlaxBeitPreTrainedModel): module_class = FlaxBeitModule FLAX_BEIT_MODEL_DOCSTRING = '\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoImageProcessor, FlaxBeitModel\n >>> from PIL import Image\n >>> import requests\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")\n >>> model = FlaxBeitModel.from_pretrained("microsoft/beit-base-patch16-224-pt22k-ft22k")\n\n >>> inputs = image_processor(images=image, return_tensors="np")\n >>> outputs = model(**inputs)\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n' overwrite_call_docstring(FlaxBeitModel, FLAX_BEIT_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxBeitModel, output_type=FlaxBeitModelOutputWithPooling, config_class=BeitConfig) class FlaxBeitForMaskedImageModelingModule(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.beit = FlaxBeitModule(self.config, add_pooling_layer=False, dtype=self.dtype) self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) def __call__(self, pixel_values=None, bool_masked_pos=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.beit(pixel_values, bool_masked_pos, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.layernorm(sequence_output) prediction_scores = self.lm_head(sequence_output[:, 1:]) if not return_dict: output = (prediction_scores,) + outputs[2:] return output return FlaxMaskedLMOutput(logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings("Beit Model transformer with a 'language' modeling head on top (to predict visual tokens).", BEIT_START_DOCSTRING) class FlaxBeitForMaskedImageModeling(FlaxBeitPreTrainedModel): module_class = FlaxBeitForMaskedImageModelingModule FLAX_BEIT_MLM_DOCSTRING = '\n bool_masked_pos (`numpy.ndarray` of shape `(batch_size, num_patches)`):\n Boolean masked positions. Indicates which patches are masked (1) and which aren\'t (0).\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoImageProcessor, BeitForMaskedImageModeling\n >>> from PIL import Image\n >>> import requests\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224-pt22k")\n >>> model = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k")\n\n >>> inputs = image_processor(images=image, return_tensors="np")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n' overwrite_call_docstring(FlaxBeitForMaskedImageModeling, FLAX_BEIT_MLM_DOCSTRING) append_replace_return_docstrings(FlaxBeitForMaskedImageModeling, output_type=FlaxMaskedLMOutput, config_class=BeitConfig) class FlaxBeitForImageClassificationModule(nn.Module): config: BeitConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.beit = FlaxBeitModule(config=self.config, dtype=self.dtype, add_pooling_layer=True) self.classifier = nn.Dense(self.config.num_labels, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) def __call__(self, pixel_values=None, bool_masked_pos=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.beit(pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Beit Model transformer with an image classification head on top (a linear layer on top of the average of the final\n hidden states of the patch tokens) e.g. for ImageNet.\n ', BEIT_START_DOCSTRING) class FlaxBeitForImageClassification(FlaxBeitPreTrainedModel): module_class = FlaxBeitForImageClassificationModule FLAX_BEIT_CLASSIF_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoImageProcessor, FlaxBeitForImageClassification\n >>> from PIL import Image\n >>> import requests\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/beit-base-patch16-224")\n >>> model = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224")\n\n >>> inputs = image_processor(images=image, return_tensors="np")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> # model predicts one of the 1000 ImageNet classes\n >>> predicted_class_idx = logits.argmax(-1).item()\n >>> print("Predicted class:", model.config.id2label[predicted_class_idx])\n ```\n' overwrite_call_docstring(FlaxBeitForImageClassification, FLAX_BEIT_CLASSIF_DOCSTRING) append_replace_return_docstrings(FlaxBeitForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=BeitConfig) # File: transformers-main/src/transformers/models/bert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_bert': ['BertConfig', 'BertOnnxConfig'], 'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bert_fast'] = ['BertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bert'] = ['BertForMaskedLM', 'BertForMultipleChoice', 'BertForNextSentencePrediction', 'BertForPreTraining', 'BertForQuestionAnswering', 'BertForSequenceClassification', 'BertForTokenClassification', 'BertLayer', 'BertLMHeadModel', 'BertModel', 'BertPreTrainedModel', 'load_tf_weights_in_bert'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_bert'] = ['TFBertEmbeddings', 'TFBertForMaskedLM', 'TFBertForMultipleChoice', 'TFBertForNextSentencePrediction', 'TFBertForPreTraining', 'TFBertForQuestionAnswering', 'TFBertForSequenceClassification', 'TFBertForTokenClassification', 'TFBertLMHeadModel', 'TFBertMainLayer', 'TFBertModel', 'TFBertPreTrainedModel'] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bert_tf'] = ['TFBertTokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_bert'] = ['FlaxBertForCausalLM', 'FlaxBertForMaskedLM', 'FlaxBertForMultipleChoice', 'FlaxBertForNextSentencePrediction', 'FlaxBertForPreTraining', 'FlaxBertForQuestionAnswering', 'FlaxBertForSequenceClassification', 'FlaxBertForTokenClassification', 'FlaxBertModel', 'FlaxBertPreTrainedModel'] if TYPE_CHECKING: from .configuration_bert import BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bert/configuration_bert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class BertConfig(PretrainedConfig): model_type = 'bert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class BertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) # File: transformers-main/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py """""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_tf2_weights_in_bert(model, tf_checkpoint_path, config): tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] layer_depth = [] for (full_name, shape) in init_vars: name = full_name.split('/') if full_name == '_CHECKPOINTABLE_OBJECT_GRAPH' or name[0] in ['global_step', 'save_counter']: logger.info(f'Skipping non-model layer {full_name}') continue if 'optimizer' in full_name: logger.info(f'Skipping optimization layer {full_name}') continue if name[0] == 'model': name = name[1:] depth = 0 for _name in name: if _name.startswith('layer_with_weights'): depth += 1 else: break layer_depth.append(depth) array = tf.train.load_variable(tf_path, full_name) names.append('/'.join(name)) arrays.append(array) logger.info(f'Read a total of {len(arrays):,} layers') if len(set(layer_depth)) != 1: raise ValueError(f'Found layer names with different depths (layer depth {list(set(layer_depth))})') layer_depth = list(set(layer_depth))[0] if layer_depth != 1: raise ValueError('The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP heads.') logger.info('Converting weights...') for (full_name, array) in zip(names, arrays): name = full_name.split('/') pointer = model trace = [] for (i, m_name) in enumerate(name): if m_name == '.ATTRIBUTES': break if m_name.startswith('layer_with_weights'): layer_num = int(m_name.split('-')[-1]) if layer_num <= 2: continue elif layer_num == 3: trace.extend(['embeddings', 'LayerNorm']) pointer = getattr(pointer, 'embeddings') pointer = getattr(pointer, 'LayerNorm') elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: trace.extend(['encoder', 'layer', str(layer_num - 4)]) pointer = getattr(pointer, 'encoder') pointer = getattr(pointer, 'layer') pointer = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: trace.extend(['pooler', 'dense']) pointer = getattr(pointer, 'pooler') pointer = getattr(pointer, 'dense') elif m_name == 'embeddings': trace.append('embeddings') pointer = getattr(pointer, 'embeddings') if layer_num == 0: trace.append('word_embeddings') pointer = getattr(pointer, 'word_embeddings') elif layer_num == 1: trace.append('position_embeddings') pointer = getattr(pointer, 'position_embeddings') elif layer_num == 2: trace.append('token_type_embeddings') pointer = getattr(pointer, 'token_type_embeddings') else: raise ValueError(f'Unknown embedding layer with name {full_name}') trace.append('weight') pointer = getattr(pointer, 'weight') elif m_name == '_attention_layer': trace.extend(['attention', 'self']) pointer = getattr(pointer, 'attention') pointer = getattr(pointer, 'self') elif m_name == '_attention_layer_norm': trace.extend(['attention', 'output', 'LayerNorm']) pointer = getattr(pointer, 'attention') pointer = getattr(pointer, 'output') pointer = getattr(pointer, 'LayerNorm') elif m_name == '_attention_output_dense': trace.extend(['attention', 'output', 'dense']) pointer = getattr(pointer, 'attention') pointer = getattr(pointer, 'output') pointer = getattr(pointer, 'dense') elif m_name == '_output_dense': trace.extend(['output', 'dense']) pointer = getattr(pointer, 'output') pointer = getattr(pointer, 'dense') elif m_name == '_output_layer_norm': trace.extend(['output', 'LayerNorm']) pointer = getattr(pointer, 'output') pointer = getattr(pointer, 'LayerNorm') elif m_name == '_key_dense': trace.append('key') pointer = getattr(pointer, 'key') elif m_name == '_query_dense': trace.append('query') pointer = getattr(pointer, 'query') elif m_name == '_value_dense': trace.append('value') pointer = getattr(pointer, 'value') elif m_name == '_intermediate_dense': trace.extend(['intermediate', 'dense']) pointer = getattr(pointer, 'intermediate') pointer = getattr(pointer, 'dense') elif m_name == '_output_layer_norm': trace.append('output') pointer = getattr(pointer, 'output') elif m_name in ['bias', 'beta']: trace.append('bias') pointer = getattr(pointer, 'bias') elif m_name in ['kernel', 'gamma']: trace.append('weight') pointer = getattr(pointer, 'weight') else: logger.warning(f'Ignored {m_name}') trace = '.'.join(trace) if re.match('(\\S+)\\.attention\\.self\\.(key|value|query)\\.(bias|weight)', trace) or re.match('(\\S+)\\.attention\\.output\\.dense\\.weight', trace): array = array.reshape(pointer.data.shape) if 'kernel' in full_name: array = array.transpose() if pointer.shape == array.shape: pointer.data = torch.from_numpy(array) else: raise ValueError(f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape: {array.shape}') logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}') return model def convert_tf2_checkpoint_to_pytorch(tf_checkpoint_path, config_path, pytorch_dump_path): logger.info(f'Loading model based on config from {config_path}...') config = BertConfig.from_json_file(config_path) model = BertModel(config) logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...') load_tf2_weights_in_bert(model, tf_checkpoint_path, config) logger.info(f'Saving PyTorch model to {pytorch_dump_path}...') torch.save(model.state_dict(), pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.') parser.add_argument('--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.') parser.add_argument('--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).') args = parser.parse_args() convert_tf2_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): config = BertConfig.from_json_file(bert_config_file) print(f'Building PyTorch model from configuration: {config}') model = BertForPreTraining(config) load_tf_weights_in_bert(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') torch.save(model.state_dict(), pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--bert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py """""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): tensors_to_transpose = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') var_map = (('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel')) if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) state_dict = model.state_dict() def to_tf_var_name(name: str): for (patt, repl) in iter(var_map): name = name.replace(patt, repl) return f'bert/{name}' def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): tf_dtype = tf.dtypes.as_dtype(tensor.dtype) tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(tf_var) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any((x in var_name for x in tensors_to_transpose)): torch_tensor = torch_tensor.T tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) tf_var.assign(tf.cast(torch_tensor, tf_var.dtype)) tf_weight = session.run(tf_var) print(f'Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}') saver = tf.train.Saver(tf.trainable_variables()) saver.save(session, os.path.join(ckpt_dir, model_name.replace('-', '_') + '.ckpt')) def main(raw_args=None): parser = argparse.ArgumentParser() parser.add_argument('--model_name', type=str, required=True, help='model name e.g. google-bert/bert-base-uncased') parser.add_argument('--cache_dir', type=str, default=None, required=False, help='Directory containing pytorch model') parser.add_argument('--pytorch_model_path', type=str, required=True, help='/path/to/.bin') parser.add_argument('--tf_cache_dir', type=str, required=True, help='Directory in which to save tensorflow model') args = parser.parse_args(raw_args) model = BertModel.from_pretrained(pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir) convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py """""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput from transformers.utils import logging logging.set_verbosity_info() def convert_checkpoint_to_pytorch(tf_checkpoint_path: str, config_path: str, pytorch_dump_path: str): def get_masked_lm_array(name: str): full_name = f'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE' array = tf.train.load_variable(tf_checkpoint_path, full_name) if 'kernel' in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_array(name: str): full_name = f'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE' array = tf.train.load_variable(tf_checkpoint_path, full_name) if 'kernel' in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_layer_array(layer_index: int, name: str): full_name = f'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE' array = tf.train.load_variable(tf_checkpoint_path, full_name) if 'kernel' in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_attention_layer_array(layer_index: int, name: str, orginal_shape): full_name = f'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE' array = tf.train.load_variable(tf_checkpoint_path, full_name) array = array.reshape(orginal_shape) if 'kernel' in name: array = array.transpose() return torch.from_numpy(array) print(f'Loading model based on config from {config_path}...') config = BertConfig.from_json_file(config_path) model = BertForMaskedLM(config) for layer_index in range(0, config.num_hidden_layers): layer: BertLayer = model.bert.encoder.layer[layer_index] self_attn: BertSelfAttention = layer.attention.self self_attn.query.weight.data = get_encoder_attention_layer_array(layer_index, '_query_dense/kernel', self_attn.query.weight.data.shape) self_attn.query.bias.data = get_encoder_attention_layer_array(layer_index, '_query_dense/bias', self_attn.query.bias.data.shape) self_attn.key.weight.data = get_encoder_attention_layer_array(layer_index, '_key_dense/kernel', self_attn.key.weight.data.shape) self_attn.key.bias.data = get_encoder_attention_layer_array(layer_index, '_key_dense/bias', self_attn.key.bias.data.shape) self_attn.value.weight.data = get_encoder_attention_layer_array(layer_index, '_value_dense/kernel', self_attn.value.weight.data.shape) self_attn.value.bias.data = get_encoder_attention_layer_array(layer_index, '_value_dense/bias', self_attn.value.bias.data.shape) self_output: BertSelfOutput = layer.attention.output self_output.dense.weight.data = get_encoder_attention_layer_array(layer_index, '_output_dense/kernel', self_output.dense.weight.data.shape) self_output.dense.bias.data = get_encoder_attention_layer_array(layer_index, '_output_dense/bias', self_output.dense.bias.data.shape) self_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, '_attention_layer_norm/gamma') self_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, '_attention_layer_norm/beta') intermediate: BertIntermediate = layer.intermediate intermediate.dense.weight.data = get_encoder_layer_array(layer_index, '_intermediate_dense/kernel') intermediate.dense.bias.data = get_encoder_layer_array(layer_index, '_intermediate_dense/bias') bert_output: BertOutput = layer.output bert_output.dense.weight.data = get_encoder_layer_array(layer_index, '_output_dense/kernel') bert_output.dense.bias.data = get_encoder_layer_array(layer_index, '_output_dense/bias') bert_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, '_output_layer_norm/gamma') bert_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, '_output_layer_norm/beta') model.bert.embeddings.position_embeddings.weight.data = get_encoder_array('_position_embedding_layer/embeddings') model.bert.embeddings.token_type_embeddings.weight.data = get_encoder_array('_type_embedding_layer/embeddings') model.bert.embeddings.LayerNorm.weight.data = get_encoder_array('_embedding_norm_layer/gamma') model.bert.embeddings.LayerNorm.bias.data = get_encoder_array('_embedding_norm_layer/beta') lm_head = model.cls.predictions.transform lm_head.dense.weight.data = get_masked_lm_array('dense/kernel') lm_head.dense.bias.data = get_masked_lm_array('dense/bias') lm_head.LayerNorm.weight.data = get_masked_lm_array('layer_norm/gamma') lm_head.LayerNorm.bias.data = get_masked_lm_array('layer_norm/beta') model.bert.embeddings.word_embeddings.weight.data = get_masked_lm_array('embedding_table') model.bert.pooler = BertPooler(config=config) model.bert.pooler.dense.weight.data: BertPooler = get_encoder_array('_pooler_layer/kernel') model.bert.pooler.dense.bias.data: BertPooler = get_encoder_array('_pooler_layer/bias') model.save_pretrained(pytorch_dump_path) new_model = BertForMaskedLM.from_pretrained(pytorch_dump_path) print(new_model.eval()) print('Model conversion was done sucessfully!') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.') parser.add_argument('--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.') parser.add_argument('--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/bert/modeling_bert.py """""" import math import os import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, get_torch_version, logging, replace_return_docstrings from .configuration_bert import BertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google-bert/bert-base-uncased' _CONFIG_FOR_DOC = 'BertConfig' _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = 'dbmdz/bert-large-cased-finetuned-conll03-english' _TOKEN_CLASS_EXPECTED_OUTPUT = "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " _TOKEN_CLASS_EXPECTED_LOSS = 0.01 _CHECKPOINT_FOR_QA = 'deepset/bert-base-cased-squad2' _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 7.41 _QA_TARGET_START_INDEX = 14 _QA_TARGET_END_INDEX = 15 _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = 'textattack/bert-base-uncased-yelp-polarity' _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" _SEQ_CLASS_EXPECTED_LOSS = 0.01 def load_tf_weights_in_bert(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class BertEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BertSdpaSelfAttention(BertSelfAttention): def __init__(self, config, position_embedding_type=None): super().__init__(config, position_embedding_type=position_embedding_type) self.dropout_prob = config.attention_probs_dropout_prob self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse('2.2.0') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: if self.position_embedding_type != 'absolute' or output_attentions or head_mask is not None: logger.warning_once('BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) (bsz, tgt_len, _) = hidden_states.size() query_layer = self.transpose_for_scores(self.query(hidden_states)) is_cross_attention = encoder_hidden_states is not None current_states = encoder_hidden_states if is_cross_attention else hidden_states attention_mask = encoder_attention_mask if is_cross_attention else attention_mask if is_cross_attention and past_key_value and (past_key_value[0].shape[2] == current_states.shape[1]): (key_layer, value_layer) = past_key_value else: key_layer = self.transpose_for_scores(self.key(current_states)) value_layer = self.transpose_for_scores(self.value(current_states)) if past_key_value is not None and (not is_cross_attention): key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) if self.is_decoder: past_key_value = (key_layer, value_layer) if self.require_contiguous_qkv and query_layer.device.type == 'cuda' and (attention_mask is not None): query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() is_causal = True if self.is_decoder and (not is_cross_attention) and (attention_mask is None) and (tgt_len > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size) outputs = (attn_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states BERT_SELF_ATTENTION_CLASSES = {'eager': BertSelfAttention, 'sdpa': BertSdpaSelfAttention} class BertAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BERT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = BertAttention(config, position_embedding_type='absolute') self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class BertPreTrainedModel(PreTrainedModel): config_class = BertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = 'bert' supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class BertForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None BERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`or `(batch_size, sequence_length, target_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', BERT_START_DOCSTRING) class BertModel(BertPreTrainedModel): _no_split_modules = ['BertEmbeddings', 'BertLayer'] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.attn_implementation = config._attn_implementation self.position_embedding_type = config.position_embedding_type self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) use_sdpa_attention_masks = self.attn_implementation == 'sdpa' and self.position_embedding_type == 'absolute' and (head_mask is None) and (not output_attentions) if use_sdpa_attention_masks and attention_mask.dim() == 2: if self.config.is_decoder: extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, embedding_output, past_key_values_length) else: extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embedding_output.dtype, tgt_len=seq_length) else: extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', BERT_START_DOCSTRING) class BertForPreTraining(BertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] (prediction_scores, seq_relationship_score) = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return BertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Bert Model with a `language modeling` head on top for CLM fine-tuning.', BERT_START_DOCSTRING) class BertLMHeadModel(BertPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`') self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('Bert Model with a `language modeling` head on top.', BERT_START_DOCSTRING) class BertForMaskedLM(BertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.88) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask} @add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', BERT_START_DOCSTRING) class BertForNextSentencePrediction(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', BERT_START_DOCSTRING) class BertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BertModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', BERT_START_DOCSTRING) class BertForMultipleChoice(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BertModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BERT_START_DOCSTRING) class BertForTokenClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BERT_START_DOCSTRING) class BertForQuestionAnswering(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/bert/modeling_flax_bert.py from typing import Callable, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxNextSentencePredictorOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_bert import BertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google-bert/bert-base-uncased' _CONFIG_FOR_DOC = 'BertConfig' remat = nn_partitioning.remat @flax.struct.dataclass class FlaxBertForPreTrainingOutput(ModelOutput): prediction_logits: jnp.ndarray = None seq_relationship_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None BERT_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n\n' BERT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n head_mask (`numpy.ndarray` of shape `({0})`, `optional):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n' class FlaxBertEmbeddings(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.word_embeddings = nn.Embed(self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.position_embeddings = nn.Embed(self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.token_type_embeddings = nn.Embed(self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool=True): inputs_embeds = self.word_embeddings(input_ids.astype('i4')) position_embeds = self.position_embeddings(position_ids.astype('i4')) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype('i4')) hidden_states = inputs_embeds + token_type_embeddings + position_embeds hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxBertSelfAttention(nn.Module): config: BertConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError('`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` : {self.config.num_attention_heads}') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic=True, output_attentions: bool=False): is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.query(hidden_states) if is_cross_attention: key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) if layer_head_mask is not None: attn_weights = jnp.einsum('...hqk,h->...hqk', attn_weights, layer_head_mask) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxBertSelfOutput(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FlaxBertAttention(nn.Module): config: BertConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxBertSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxBertSelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool=False): attn_outputs = self.self(hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxBertIntermediate(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxBertOutput(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states class FlaxBertLayer(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxBertAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxBertIntermediate(self.config, dtype=self.dtype) self.output = FlaxBertOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxBertAttention(self.config, causal=False, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False): attention_outputs = self.attention(hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) attention_output = attention_outputs[0] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs class FlaxBertLayerCollection(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxBertCheckpointLayer = remat(FlaxBertLayer, static_argnums=(5, 6, 7)) self.layers = [FlaxBertCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] else: self.layers = [FlaxBertLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None if head_mask is not None: if head_mask.shape[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.shape[0]}.') for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) class FlaxBertEncoder(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.layer = FlaxBertLayerCollection(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layer(hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxBertPooler(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) class FlaxBertPredictionHeadTransform(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) class FlaxBertLMPredictionHead(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = FlaxBertPredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param('bias', self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states class FlaxBertOnlyMLMHead(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states class FlaxBertOnlyNSPHead(nn.Module): dtype: jnp.dtype = jnp.float32 def setup(self): self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, pooled_output): return self.seq_relationship(pooled_output) class FlaxBertPreTrainingHeads(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBertLMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class FlaxBertPreTrainedModel(FlaxPreTrainedModel): config_class = BertConfig base_model_prefix = 'bert' module_class: nn.Module = None def __init__(self, config: BertConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, gradient_checkpointing: bool=False, **kwargs): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class(config=self.config, dtype=self.dtype, gradient_checkpointing=True) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False) random_params = module_init_outputs['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, past_key_values: dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if self.config.add_cross_attention: if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] else: outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs) return outputs class FlaxBertModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxBertEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxBertEncoder(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.pooler = FlaxBertPooler(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, head_mask: Optional[jnp.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic) outputs = self.encoder(hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', BERT_START_DOCSTRING) class FlaxBertModel(FlaxBertPreTrainedModel): module_class = FlaxBertModule append_call_sample_docstring(FlaxBertModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC) class FlaxBertForPreTrainingModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBertPreTrainingHeads(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None hidden_states = outputs[0] pooled_output = outputs[1] (prediction_scores, seq_relationship_score) = self.cls(hidden_states, pooled_output, shared_embedding=shared_embedding) if not return_dict: return (prediction_scores, seq_relationship_score) + outputs[2:] return FlaxBertForPreTrainingOutput(prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', BERT_START_DOCSTRING) class FlaxBertForPreTraining(FlaxBertPreTrainedModel): module_class = FlaxBertForPreTrainingModule FLAX_BERT_FOR_PRETRAINING_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxBertForPreTraining\n\n >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")\n >>> model = FlaxBertForPreTraining.from_pretrained("google-bert/bert-base-uncased")\n\n >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n ```\n' overwrite_call_docstring(FlaxBertForPreTraining, BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FLAX_BERT_FOR_PRETRAINING_DOCSTRING) append_replace_return_docstrings(FlaxBertForPreTraining, output_type=FlaxBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) class FlaxBertForMaskedLMModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Bert Model with a `language modeling` head on top.', BERT_START_DOCSTRING) class FlaxBertForMaskedLM(FlaxBertPreTrainedModel): module_class = FlaxBertForMaskedLMModule append_call_sample_docstring(FlaxBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) class FlaxBertForNextSentencePredictionModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBertOnlyNSPHead(dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.return_dict outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) if not return_dict: return (seq_relationship_scores,) + outputs[2:] return FlaxNextSentencePredictorOutput(logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', BERT_START_DOCSTRING) class FlaxBertForNextSentencePrediction(FlaxBertPreTrainedModel): module_class = FlaxBertForNextSentencePredictionModule FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxBertForNextSentencePrediction\n\n >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")\n >>> model = FlaxBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased")\n\n >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."\n >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."\n >>> encoding = tokenizer(prompt, next_sentence, return_tensors="jax")\n\n >>> outputs = model(**encoding)\n >>> logits = outputs.logits\n >>> assert logits[0, 0] < logits[0, 1] # next sentence was random\n ```\n' overwrite_call_docstring(FlaxBertForNextSentencePrediction, BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FLAX_BERT_FOR_NEXT_SENT_PRED_DOCSTRING) append_replace_return_docstrings(FlaxBertForNextSentencePrediction, output_type=FlaxNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) class FlaxBertForSequenceClassificationModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', BERT_START_DOCSTRING) class FlaxBertForSequenceClassification(FlaxBertPreTrainedModel): module_class = FlaxBertForSequenceClassificationModule append_call_sample_docstring(FlaxBertForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxBertForMultipleChoiceModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput(logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', BERT_START_DOCSTRING) class FlaxBertForMultipleChoice(FlaxBertPreTrainedModel): module_class = FlaxBertForMultipleChoiceModule overwrite_call_docstring(FlaxBertForMultipleChoice, BERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) append_call_sample_docstring(FlaxBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC) class FlaxBertForTokenClassificationModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BERT_START_DOCSTRING) class FlaxBertForTokenClassification(FlaxBertPreTrainedModel): module_class = FlaxBertForTokenClassificationModule append_call_sample_docstring(FlaxBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC) class FlaxBertForQuestionAnsweringModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BERT_START_DOCSTRING) class FlaxBertForQuestionAnswering(FlaxBertPreTrainedModel): module_class = FlaxBertForQuestionAnsweringModule append_call_sample_docstring(FlaxBertForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) class FlaxBertForCausalLMModule(nn.Module): config: BertConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBertModule(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBertOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray]=None, head_mask: Optional[jnp.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('\n Bert Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for\n autoregressive tasks.\n ', BERT_START_DOCSTRING) class FlaxBertForCausalLM(FlaxBertPreTrainedModel): module_class = FlaxBertForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxBertForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/bert/modeling_tf_bert.py """""" from __future__ import annotations import math import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFNextSentencePredictorOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFNextSentencePredictionLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bert import BertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google-bert/bert-base-uncased' _CONFIG_FOR_DOC = 'BertConfig' _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = 'dbmdz/bert-large-cased-finetuned-conll03-english' _TOKEN_CLASS_EXPECTED_OUTPUT = "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " _TOKEN_CLASS_EXPECTED_LOSS = 0.01 _CHECKPOINT_FOR_QA = 'ydshieh/bert-base-cased-squad2' _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 7.41 _QA_TARGET_START_INDEX = 14 _QA_TARGET_END_INDEX = 15 _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = 'ydshieh/bert-base-uncased-yelp-polarity' _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" _SEQ_CLASS_EXPECTED_LOSS = 0.01 class TFBertPreTrainingLoss: def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=keras.losses.Reduction.NONE) unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels['labels']), y_pred=logits[0]) lm_loss_mask = tf.cast(labels['labels'] != -100, dtype=unmasked_lm_losses.dtype) masked_lm_losses = unmasked_lm_losses * lm_loss_mask reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels['next_sentence_label']), y_pred=logits[1]) ns_loss_mask = tf.cast(labels['next_sentence_label'] != -100, dtype=unmasked_ns_loss.dtype) masked_ns_loss = unmasked_ns_loss * ns_loss_mask reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask) return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,)) class TFBertEmbeddings(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('token_type_embeddings'): self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('Need to provide either `input_ids` or `input_embeds`.') if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFBertSelfAttention(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.dropout(inputs=attention_probs, training=training) if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFBertSelfOutput(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBertAttention(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFBertSelfAttention(config, name='self') self.dense_output = TFBertSelfOutput(config, name='output') def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_outputs = self.self_attention(hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attention', None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, 'dense_output', None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFBertIntermediate(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFBertOutput(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBertLayer(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFBertAttention(config, name='attention') self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = TFBertAttention(config, name='crossattention') self.intermediate = TFBertIntermediate(config, name='intermediate') self.bert_output = TFBertOutput(config, name='output') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'intermediate', None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, 'bert_output', None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, 'crossattention', None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) class TFBertEncoder(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFBertLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFBertPooler(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFBertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBertLMPredictionHead(keras.layers.Layer): def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFBertPredictionHeadTransform(config, name='transform') self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') if self.built: return self.built = True if getattr(self, 'transform', None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {'bias': self.bias} def set_bias(self, value: tf.Variable): self.bias = value['bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFBertMLMHead(keras.layers.Layer): def __init__(self, config: BertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFBertLMPredictionHead(config, input_embeddings, name='predictions') def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'predictions', None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) class TFBertNSPHead(keras.layers.Layer): def __init__(self, config: BertConfig, **kwargs): super().__init__(**kwargs) self.seq_relationship = keras.layers.Dense(units=2, kernel_initializer=get_initializer(config.initializer_range), name='seq_relationship') self.config = config def call(self, pooled_output: tf.Tensor) -> tf.Tensor: seq_relationship_score = self.seq_relationship(inputs=pooled_output) return seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'seq_relationship', None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) @keras_serializable class TFBertMainLayer(keras.layers.Layer): config_class = BertConfig def __init__(self, config: BertConfig, add_pooling_layer: bool=True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFBertEmbeddings(config, name='embeddings') self.encoder = TFBertEncoder(config, name='encoder') self.pooler = TFBertPooler(config, name='pooler') if add_pooling_layer else None def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training) attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal(tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None]) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape(extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])) if past_key_values[0] is not None: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if self.is_decoder and encoder_attention_mask is not None: encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'pooler', None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFBertPreTrainedModel(TFPreTrainedModel): config_class = BertConfig base_model_prefix = 'bert' @dataclass class TFBertForPreTrainingOutput(ModelOutput): loss: tf.Tensor | None = None prediction_logits: tf.Tensor = None seq_relationship_logits: tf.Tensor = None hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None BERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`BertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' BERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', BERT_START_DOCSTRING) class TFBertModel(TFBertPreTrainedModel): def __init__(self, config: BertConfig, add_pooling_layer: bool=True, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, add_pooling_layer, name='bert') @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) @add_start_docstrings('\nBert Model with two heads on top as done during the pretraining:\n a `masked language modeling` head and a `next sentence prediction (classification)` head.\n ', BERT_START_DOCSTRING) class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss): _keys_to_ignore_on_load_unexpected = ['position_ids', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name='bert') self.nsp = TFBertNSPHead(config, name='nsp___cls') self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name='mlm___cls') def get_lm_head(self) -> keras.layers.Layer: return self.mlm.predictions def get_prefix_bias_name(self) -> str: warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.mlm.name + '/' + self.mlm.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) (sequence_output, pooled_output) = outputs[:2] prediction_scores = self.mlm(sequence_output=sequence_output, training=training) seq_relationship_score = self.nsp(pooled_output=pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: d_labels = {'labels': labels} d_labels['next_sentence_label'] = next_sentence_label total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score)) if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return TFBertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'nsp', None) is not None: with tf.name_scope(self.nsp.name): self.nsp.build(None) if getattr(self, 'mlm', None) is not None: with tf.name_scope(self.mlm.name): self.mlm.build(None) @add_start_docstrings('Bert Model with a `language modeling` head on top.', BERT_START_DOCSTRING) class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'cls.seq_relationship', 'cls.predictions.decoder.weight', 'nsp___cls'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if config.is_decoder: logger.warning('If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.bert = TFBertMainLayer(config, add_pooling_layer=False, name='bert') self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name='mlm___cls') def get_lm_head(self) -> keras.layers.Layer: return self.mlm.predictions def get_prefix_bias_name(self) -> str: warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.mlm.name + '/' + self.mlm.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.88) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] prediction_scores = self.mlm(sequence_output=sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'mlm', None) is not None: with tf.name_scope(self.mlm.name): self.mlm.build(None) class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'cls.seq_relationship', 'cls.predictions.decoder.weight', 'nsp___cls'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning('If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`') self.bert = TFBertMainLayer(config, add_pooling_layer=False, name='bert') self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name='mlm___cls') def get_lm_head(self) -> keras.layers.Layer: return self.mlm.predictions def get_prefix_bias_name(self) -> str: warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.mlm.name + '/' + self.mlm.predictions.name def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = tf.ones(input_shape) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} @unpack_inputs @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False, **kwargs) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.mlm(sequence_output=sequence_output, training=training) loss = None if labels is not None: shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFCausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'mlm', None) is not None: with tf.name_scope(self.mlm.name): self.mlm.build(None) @add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', BERT_START_DOCSTRING) class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredictionLoss): _keys_to_ignore_on_load_unexpected = ['mlm___cls', 'cls.predictions'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name='bert') self.nsp = TFBertNSPHead(config, name='nsp___cls') @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] seq_relationship_scores = self.nsp(pooled_output=pooled_output) next_sentence_loss = None if next_sentence_label is None else self.hf_compute_loss(labels=next_sentence_label, logits=seq_relationship_scores) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return TFNextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'nsp', None) is not None: with tf.name_scope(self.nsp.name): self.nsp.build(None) @add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', BERT_START_DOCSTRING) class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_unexpected = ['mlm___cls', 'nsp___cls', 'cls.predictions', 'cls.seq_relationship'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, name='bert') classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(rate=classifier_dropout) self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', BERT_START_DOCSTRING) class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss): _keys_to_ignore_on_load_unexpected = ['mlm___cls', 'nsp___cls', 'cls.predictions', 'cls.seq_relationship'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.bert = TFBertMainLayer(config, name='bert') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.classifier = keras.layers.Dense(units=1, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None outputs = self.bert(input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] pooled_output = self.dropout(inputs=pooled_output, training=training) logits = self.classifier(inputs=pooled_output) reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BERT_START_DOCSTRING) class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'mlm___cls', 'nsp___cls', 'cls.predictions', 'cls.seq_relationship'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, add_pooling_layer=False, name='bert') classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(rate=classifier_dropout) self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(inputs=sequence_output, training=training) logits = self.classifier(inputs=sequence_output) loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BERT_START_DOCSTRING) class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'mlm___cls', 'nsp___cls', 'cls.predictions', 'cls.seq_relationship'] def __init__(self, config: BertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.bert = TFBertMainLayer(config, add_pooling_layer=False, name='bert') self.qa_outputs = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_QA, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.qa_outputs(inputs=sequence_output) (start_logits, end_logits) = tf.split(value=logits, num_or_size_splits=2, axis=-1) start_logits = tf.squeeze(input=start_logits, axis=-1) end_logits = tf.squeeze(input=end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) # File: transformers-main/src/transformers/models/bert/tokenization_bert.py """""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/bert/tokenization_bert_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class BertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/bert/tokenization_bert_tf.py import os from typing import List, Union import tensorflow as tf from tensorflow_text import BertTokenizer as BertTokenizerLayer from tensorflow_text import FastBertTokenizer, ShrinkLongestTrimmer, case_fold_utf8, combine_segments, pad_model_inputs from ...modeling_tf_utils import keras from .tokenization_bert import BertTokenizer class TFBertTokenizer(keras.layers.Layer): def __init__(self, vocab_list: List, do_lower_case: bool, cls_token_id: int=None, sep_token_id: int=None, pad_token_id: int=None, padding: str='longest', truncation: bool=True, max_length: int=512, pad_to_multiple_of: int=None, return_token_type_ids: bool=True, return_attention_mask: bool=True, use_fast_bert_tokenizer: bool=True, **tokenizer_kwargs): super().__init__() if use_fast_bert_tokenizer: self.tf_tokenizer = FastBertTokenizer(vocab_list, token_out_type=tf.int64, lower_case_nfd_strip_accents=do_lower_case, **tokenizer_kwargs) else: lookup_table = tf.lookup.StaticVocabularyTable(tf.lookup.KeyValueTensorInitializer(keys=vocab_list, key_dtype=tf.string, values=tf.range(tf.size(vocab_list, out_type=tf.int64), dtype=tf.int64), value_dtype=tf.int64), num_oov_buckets=1) self.tf_tokenizer = BertTokenizerLayer(lookup_table, token_out_type=tf.int64, lower_case=do_lower_case, **tokenizer_kwargs) self.vocab_list = vocab_list self.do_lower_case = do_lower_case self.cls_token_id = vocab_list.index('[CLS]') if cls_token_id is None else cls_token_id self.sep_token_id = vocab_list.index('[SEP]') if sep_token_id is None else sep_token_id self.pad_token_id = vocab_list.index('[PAD]') if pad_token_id is None else pad_token_id self.paired_trimmer = ShrinkLongestTrimmer(max_length - 3, axis=1) self.max_length = max_length self.padding = padding self.truncation = truncation self.pad_to_multiple_of = pad_to_multiple_of self.return_token_type_ids = return_token_type_ids self.return_attention_mask = return_attention_mask @classmethod def from_tokenizer(cls, tokenizer: 'PreTrainedTokenizerBase', **kwargs): do_lower_case = kwargs.pop('do_lower_case', None) do_lower_case = tokenizer.do_lower_case if do_lower_case is None else do_lower_case cls_token_id = kwargs.pop('cls_token_id', None) cls_token_id = tokenizer.cls_token_id if cls_token_id is None else cls_token_id sep_token_id = kwargs.pop('sep_token_id', None) sep_token_id = tokenizer.sep_token_id if sep_token_id is None else sep_token_id pad_token_id = kwargs.pop('pad_token_id', None) pad_token_id = tokenizer.pad_token_id if pad_token_id is None else pad_token_id vocab = tokenizer.get_vocab() vocab = sorted(vocab.items(), key=lambda x: x[1]) vocab_list = [entry[0] for entry in vocab] return cls(vocab_list=vocab_list, do_lower_case=do_lower_case, cls_token_id=cls_token_id, sep_token_id=sep_token_id, pad_token_id=pad_token_id, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): try: tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) except: from .tokenization_bert_fast import BertTokenizerFast tokenizer = BertTokenizerFast.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, **kwargs) def unpaired_tokenize(self, texts): if self.do_lower_case: texts = case_fold_utf8(texts) tokens = self.tf_tokenizer.tokenize(texts) return tokens.merge_dims(1, -1) def call(self, text, text_pair=None, padding=None, truncation=None, max_length=None, pad_to_multiple_of=None, return_token_type_ids=None, return_attention_mask=None): if padding is None: padding = self.padding if padding not in ('longest', 'max_length'): raise ValueError("Padding must be either 'longest' or 'max_length'!") if max_length is not None and text_pair is not None: raise ValueError('max_length cannot be overridden at call time when truncating paired texts!') if max_length is None: max_length = self.max_length if truncation is None: truncation = self.truncation if pad_to_multiple_of is None: pad_to_multiple_of = self.pad_to_multiple_of if return_token_type_ids is None: return_token_type_ids = self.return_token_type_ids if return_attention_mask is None: return_attention_mask = self.return_attention_mask if not isinstance(text, tf.Tensor): text = tf.convert_to_tensor(text) if text_pair is not None and (not isinstance(text_pair, tf.Tensor)): text_pair = tf.convert_to_tensor(text_pair) if text_pair is not None: if text.shape.rank > 1: raise ValueError('text argument should not be multidimensional when a text pair is supplied!') if text_pair.shape.rank > 1: raise ValueError('text_pair should not be multidimensional!') if text.shape.rank == 2: (text, text_pair) = (text[:, 0], text[:, 1]) text = self.unpaired_tokenize(text) if text_pair is None: if truncation: text = text[:, :max_length - 2] (input_ids, token_type_ids) = combine_segments((text,), start_of_sequence_id=self.cls_token_id, end_of_segment_id=self.sep_token_id) else: text_pair = self.unpaired_tokenize(text_pair) if truncation: (text, text_pair) = self.paired_trimmer.trim([text, text_pair]) (input_ids, token_type_ids) = combine_segments((text, text_pair), start_of_sequence_id=self.cls_token_id, end_of_segment_id=self.sep_token_id) if padding == 'longest': pad_length = input_ids.bounding_shape(axis=1) if pad_to_multiple_of is not None: pad_length = pad_to_multiple_of * -tf.math.floordiv(-pad_length, pad_to_multiple_of) else: pad_length = max_length (input_ids, attention_mask) = pad_model_inputs(input_ids, max_seq_length=pad_length, pad_value=self.pad_token_id) output = {'input_ids': input_ids} if return_attention_mask: output['attention_mask'] = attention_mask if return_token_type_ids: (token_type_ids, _) = pad_model_inputs(token_type_ids, max_seq_length=pad_length, pad_value=self.pad_token_id) output['token_type_ids'] = token_type_ids return output def get_config(self): return {'vocab_list': self.vocab_list, 'do_lower_case': self.do_lower_case, 'cls_token_id': self.cls_token_id, 'sep_token_id': self.sep_token_id, 'pad_token_id': self.pad_token_id} # File: transformers-main/src/transformers/models/bert_generation/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available _import_structure = {'configuration_bert_generation': ['BertGenerationConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bert_generation'] = ['BertGenerationTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bert_generation'] = ['BertGenerationDecoder', 'BertGenerationEncoder', 'BertGenerationPreTrainedModel', 'load_tf_weights_in_bert_generation'] if TYPE_CHECKING: from .configuration_bert_generation import BertGenerationConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_generation import BertGenerationTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder, BertGenerationPreTrainedModel, load_tf_weights_in_bert_generation else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bert_generation/configuration_bert_generation.py """""" from ...configuration_utils import PretrainedConfig class BertGenerationConfig(PretrainedConfig): model_type = 'bert-generation' def __init__(self, vocab_size=50358, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=2, eos_token_id=1, position_embedding_type='absolute', use_cache=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache # File: transformers-main/src/transformers/models/bert_generation/modeling_bert_generation.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bert_generation import BertGenerationConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/bert_for_seq_generation_L-24_bbc_encoder' _CONFIG_FOR_DOC = 'BertGenerationConfig' class BertGenerationSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertGenerationSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs BERT_GENERATION_SELF_ATTENTION_CLASSES = {'eager': BertGenerationSelfAttention} class BertGenerationAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BERT_GENERATION_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = BertGenerationSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BertGenerationIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertGenerationOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertGenerationLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertGenerationAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = BertGenerationAttention(config, position_embedding_type='absolute') self.intermediate = BertGenerationIntermediate(config) self.output = BertGenerationOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) def load_tf_weights_in_bert_generation(model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False): try: import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tensorflow_text tf.disable_eager_execution() except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_model = hub.Module(tf_hub_path) init = tf.global_variables_initializer() with tf.Session() as sess: init.run() all_variables = tf_model.variable_map keep_track_variables = all_variables.copy() for key in list(all_variables.keys()): if 'global' in key: logger.info(f'Skipping {key}...') continue if not is_encoder: model_pointer = getattr(model, model_class) else: model_pointer = model is_embedding = False logger.info(f'Trying to match {key}...') sub_layers = key.split('/')[2:] if is_encoder_named_decoder and sub_layers[0] == 'encoder': logger.info(f'Skipping encoder layer {key} for decoder') continue if is_encoder and sub_layers[0] == 'decoder': logger.info(f'Skipping decoder layer {key} for encoder') continue for (i, sub_layer) in enumerate(sub_layers): if sub_layer == 'embeddings': is_embedding = True elif sub_layer == 'LayerNorm': is_embedding = False if 'layer' in sub_layer: model_pointer = model_pointer.layer[int(sub_layer.split('_')[-1])] elif sub_layer in ['kernel', 'gamma']: model_pointer = model_pointer.weight elif sub_layer == 'beta': model_pointer = model_pointer.bias elif sub_layer == 'encdec': model_pointer = model_pointer.crossattention.self elif sub_layer == 'encdec_output': model_pointer = model_pointer.crossattention.output elif is_encoder_named_decoder and sub_layer == 'decoder': model_pointer = model_pointer.encoder else: if sub_layer == 'attention' and 'encdec' in sub_layers[i + 1]: continue try: model_pointer = getattr(model_pointer, sub_layer) except AttributeError: logger.info(f'Skipping to initialize {key} at {sub_layer}...') raise AttributeError array = np.asarray(sess.run(all_variables[key])) if not is_embedding: logger.info(f'Transposing numpy weight of shape {array.shape} for {key}') array = np.transpose(array) else: model_pointer = model_pointer.weight if model_pointer.shape != array.shape: raise ValueError(f'Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched') logger.info(f'Initialize PyTorch weight {key}') model_pointer.data = torch.from_numpy(array.astype(np.float32)) keep_track_variables.pop(key, None) logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}") return model class BertGenerationEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertGenerationPreTrainedModel(PreTrainedModel): config_class = BertGenerationConfig base_model_prefix = 'bert' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BERT_GENERATION_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BertGenerationConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BERT_GENERATION_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.', BERT_GENERATION_START_DOCSTRING) class BertGenerationEncoder(BertGenerationPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask = None if not use_cache: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) class BertGenerationOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): logits = self.decoder(hidden_states) return logits def _tie_weights(self): if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings('BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.', BERT_GENERATION_START_DOCSTRING) class BertGenerationDecoder(BertGenerationPreTrainedModel): _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`') self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/bert_generation/tokenization_bert_generation.py """""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} class BertGenerationTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES prefix_tokens: List[int] = [] model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='', eos_token='', unk_token='', pad_token='', sep_token='<::::>', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' for token in tokens: if token in self.all_special_tokens: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # File: transformers-main/src/transformers/models/bert_japanese/tokenization_bert_japanese.py """""" import collections import copy import os import unicodedata from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import is_sentencepiece_available, is_sudachi_projection_available, logging if is_sentencepiece_available(): import sentencepiece as spm else: spm = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'spm_file': 'spiece.model'} SPIECE_UNDERLINE = '▁' def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class BertJapaneseTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, spm_file=None, do_lower_case=False, do_word_tokenize=True, do_subword_tokenize=True, word_tokenizer_type='basic', subword_tokenizer_type='wordpiece', never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', mecab_kwargs=None, sudachi_kwargs=None, jumanpp_kwargs=None, **kwargs): if subword_tokenizer_type == 'sentencepiece': if not os.path.isfile(spm_file): raise ValueError(f"Can't find a vocabulary file at path '{spm_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.spm_file = spm_file else: if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_word_tokenize = do_word_tokenize self.word_tokenizer_type = word_tokenizer_type self.lower_case = do_lower_case self.never_split = never_split self.mecab_kwargs = copy.deepcopy(mecab_kwargs) self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs) self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs) if do_word_tokenize: if word_tokenizer_type == 'basic': self.word_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False) elif word_tokenizer_type == 'mecab': self.word_tokenizer = MecabTokenizer(do_lower_case=do_lower_case, never_split=never_split, **mecab_kwargs or {}) elif word_tokenizer_type == 'sudachi': self.word_tokenizer = SudachiTokenizer(do_lower_case=do_lower_case, never_split=never_split, **sudachi_kwargs or {}) elif word_tokenizer_type == 'jumanpp': self.word_tokenizer = JumanppTokenizer(do_lower_case=do_lower_case, never_split=never_split, **jumanpp_kwargs or {}) else: raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.") self.do_subword_tokenize = do_subword_tokenize self.subword_tokenizer_type = subword_tokenizer_type if do_subword_tokenize: if subword_tokenizer_type == 'wordpiece': self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) elif subword_tokenizer_type == 'character': self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=str(unk_token)) elif subword_tokenizer_type == 'sentencepiece': self.subword_tokenizer = SentencepieceTokenizer(vocab=self.spm_file, unk_token=str(unk_token)) else: raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.") super().__init__(spm_file=spm_file, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, do_lower_case=do_lower_case, do_word_tokenize=do_word_tokenize, do_subword_tokenize=do_subword_tokenize, word_tokenizer_type=word_tokenizer_type, subword_tokenizer_type=subword_tokenizer_type, never_split=never_split, mecab_kwargs=mecab_kwargs, sudachi_kwargs=sudachi_kwargs, jumanpp_kwargs=jumanpp_kwargs, **kwargs) @property def do_lower_case(self): return self.lower_case def __getstate__(self): state = dict(self.__dict__) if self.word_tokenizer_type in ['mecab', 'sudachi', 'jumanpp']: del state['word_tokenizer'] return state def __setstate__(self, state): self.__dict__ = state if self.word_tokenizer_type == 'mecab': self.word_tokenizer = MecabTokenizer(do_lower_case=self.do_lower_case, never_split=self.never_split, **self.mecab_kwargs or {}) elif self.word_tokenizer_type == 'sudachi': self.word_tokenizer = SudachiTokenizer(do_lower_case=self.do_lower_case, never_split=self.never_split, **self.sudachi_kwargs or {}) elif self.word_tokenizer_type == 'jumanpp': self.word_tokenizer = JumanppTokenizer(do_lower_case=self.do_lower_case, never_split=self.never_split, **self.jumanpp_kwargs or {}) def _tokenize(self, text): if self.do_word_tokenize: tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens) else: tokens = [text] if self.do_subword_tokenize: split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)] else: split_tokens = tokens return split_tokens @property def vocab_size(self): if self.subword_tokenizer_type == 'sentencepiece': return len(self.subword_tokenizer.sp_model) return len(self.vocab) def get_vocab(self): if self.subword_tokenizer_type == 'sentencepiece': vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab return dict(self.vocab, **self.added_tokens_encoder) def _convert_token_to_id(self, token): if self.subword_tokenizer_type == 'sentencepiece': return self.subword_tokenizer.sp_model.PieceToId(token) return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): if self.subword_tokenizer_type == 'sentencepiece': return self.subword_tokenizer.sp_model.IdToPiece(index) return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): if self.subword_tokenizer_type == 'sentencepiece': return self.subword_tokenizer.sp_model.decode(tokens) out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if os.path.isdir(save_directory): if self.subword_tokenizer_type == 'sentencepiece': vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['spm_file']) else: vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory if self.subword_tokenizer_type == 'sentencepiece': with open(vocab_file, 'wb') as writer: content_spiece_model = self.subword_tokenizer.sp_model.serialized_model_proto() writer.write(content_spiece_model) else: with open(vocab_file, 'w', encoding='utf-8') as writer: index = 0 for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class MecabTokenizer: def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, mecab_dic: Optional[str]='ipadic', mecab_option: Optional[str]=None): self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text try: import fugashi except ModuleNotFoundError as error: raise error.__class__('You need to install fugashi to use MecabTokenizer. See https://pypi.org/project/fugashi/ for installation.') mecab_option = mecab_option or '' if mecab_dic is not None: if mecab_dic == 'ipadic': try: import ipadic except ModuleNotFoundError as error: raise error.__class__('The ipadic dictionary is not installed. See https://github.com/polm/ipadic-py for installation.') dic_dir = ipadic.DICDIR elif mecab_dic == 'unidic_lite': try: import unidic_lite except ModuleNotFoundError as error: raise error.__class__('The unidic_lite dictionary is not installed. See https://github.com/polm/unidic-lite for installation.') dic_dir = unidic_lite.DICDIR elif mecab_dic == 'unidic': try: import unidic except ModuleNotFoundError as error: raise error.__class__('The unidic dictionary is not installed. See https://github.com/polm/unidic-py for installation.') dic_dir = unidic.DICDIR if not os.path.isdir(dic_dir): raise RuntimeError('The unidic dictionary itself is not found. See https://github.com/polm/unidic-py for installation.') else: raise ValueError('Invalid mecab_dic is specified.') mecabrc = os.path.join(dic_dir, 'mecabrc') mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option self.mecab = fugashi.GenericTagger(mecab_option) def tokenize(self, text, never_split=None, **kwargs): if self.normalize_text: text = unicodedata.normalize('NFKC', text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.mecab(text): token = word.surface if self.do_lower_case and token not in never_split: token = token.lower() tokens.append(token) return tokens class SudachiTokenizer: def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False, sudachi_split_mode='A', sudachi_config_path=None, sudachi_resource_dir=None, sudachi_dict_type='core', sudachi_projection=None): self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.trim_whitespace = trim_whitespace try: from sudachipy import dictionary, tokenizer except ImportError: raise ImportError('You need to install sudachipy to use SudachiTokenizer. See https://github.com/WorksApplications/SudachiPy for installation.') if sudachi_split_mode == 'A': self.split_mode = tokenizer.Tokenizer.SplitMode.A elif sudachi_split_mode == 'B': self.split_mode = tokenizer.Tokenizer.SplitMode.B elif sudachi_split_mode == 'C': self.split_mode = tokenizer.Tokenizer.SplitMode.C else: raise ValueError('Invalid sudachi_split_mode is specified.') self.projection = sudachi_projection sudachi_dictionary = dictionary.Dictionary(config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type) if is_sudachi_projection_available(): self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection) elif self.projection is not None: raise ImportError('You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.') else: self.sudachi = sudachi_dictionary.create(self.split_mode) def tokenize(self, text, never_split=None, **kwargs): if self.normalize_text: text = unicodedata.normalize('NFKC', text) never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for word in self.sudachi.tokenize(text): token = word.surface() if self.do_lower_case and token not in never_split: token = token.lower() if self.trim_whitespace: if token.strip() == '': continue else: token = token.strip() tokens.append(token) return tokens class JumanppTokenizer: def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, trim_whitespace=False): self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text self.trim_whitespace = trim_whitespace try: import rhoknp except ImportError: raise ImportError('You need to install rhoknp to use JumanppTokenizer. See https://github.com/ku-nlp/rhoknp for installation.') self.juman = rhoknp.Jumanpp() def tokenize(self, text, never_split=None, **kwargs): if self.normalize_text: text = unicodedata.normalize('NFKC', text) text = text.strip() never_split = self.never_split + (never_split if never_split is not None else []) tokens = [] for mrph in self.juman.apply_to_sentence(text).morphemes: token = mrph.text if self.do_lower_case and token not in never_split: token = token.lower() if self.trim_whitespace: if token.strip() == '': continue else: token = token.strip() tokens.append(token) return tokens class CharacterTokenizer: def __init__(self, vocab, unk_token, normalize_text=True): self.vocab = vocab self.unk_token = unk_token self.normalize_text = normalize_text def tokenize(self, text): if self.normalize_text: text = unicodedata.normalize('NFKC', text) output_tokens = [] for char in text: if char not in self.vocab: output_tokens.append(self.unk_token) continue output_tokens.append(char) return output_tokens class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens class SentencepieceTokenizer: def __init__(self, vocab, unk_token, do_lower_case=False, remove_space=True, keep_accents=True, sp_model_kwargs: Optional[Dict[str, Any]]=None): self.vocab = vocab self.unk_token = unk_token self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab) def preprocess_text(self, inputs): if self.remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace('``', '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def tokenize(self, text): text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces # File: transformers-main/src/transformers/models/bertweet/tokenization_bertweet.py """""" import html import os import re from shutil import copyfile from typing import List, Optional, Tuple import regex from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes'} def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class BertweetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, merges_file, normalization=False, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', **kwargs): try: from emoji import demojize self.demojizer = demojize except ImportError: logger.warning('emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3 install emoji==0.6.0') self.demojizer = None self.vocab_file = vocab_file self.merges_file = merges_file self.encoder = {} self.encoder[str(bos_token)] = 0 self.encoder[str(pad_token)] = 1 self.encoder[str(eos_token)] = 2 self.encoder[str(unk_token)] = 3 self.add_from_file(vocab_file) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:-1]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} self.normalization = normalization self.tweetPreprocessor = TweetTokenizer() self.special_puncts = {'’': "'", '…': '...'} super().__init__(normalization=normalization, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, **kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + '']) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = '@@ '.join(word) word = word[:-4] self.cache[token] = word return word def _tokenize(self, text): if self.normalization: text = self.normalizeTweet(text) split_tokens = [] words = re.findall('\\S+\\n?', text) for token in words: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def normalizeTweet(self, tweet): for punct in self.special_puncts: tweet = tweet.replace(punct, self.special_puncts[punct]) tokens = self.tweetPreprocessor.tokenize(tweet) normTweet = ' '.join([self.normalizeToken(token) for token in tokens]) normTweet = normTweet.replace('cannot ', 'can not ').replace("n't ", " n't ").replace("n 't ", " n't ").replace("ca n't", "can't").replace("ai n't", "ain't") normTweet = normTweet.replace("'m ", " 'm ").replace("'re ", " 're ").replace("'s ", " 's ").replace("'ll ", " 'll ").replace("'d ", " 'd ").replace("'ve ", " 've ") normTweet = normTweet.replace(' p . m .', ' p.m.').replace(' p . m ', ' p.m ').replace(' a . m .', ' a.m.').replace(' a . m ', ' a.m ') return ' '.join(normTweet.split()) def normalizeToken(self, token): lowercased_token = token.lower() if token.startswith('@'): return '@USER' elif lowercased_token.startswith('http') or lowercased_token.startswith('www'): return 'HTTPURL' elif len(token) == 1: if token in self.special_puncts: return self.special_puncts[token] if self.demojizer is not None: return self.demojizer(token) else: return token else: return token def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace('@@ ', '').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) out_merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file): copyfile(self.merges_file, out_merge_file) return (out_vocab_file, out_merge_file) def add_from_file(self, f): if isinstance(f, str): try: with open(f, 'r', encoding='utf-8') as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset') return lines = f.readlines() for lineTmp in lines: line = lineTmp.strip() idx = line.rfind(' ') if idx == -1: raise ValueError("Incorrect dictionary format, expected ' '") word = line[:idx] self.encoder[word] = len(self.encoder) '' EMOTICONS = "\n (?:\n [<>]?\n [:;=8] # eyes\n [\\-o\\*\\']? # optional nose\n [\\)\\]\\(\\[dDpP/\\:\\}\\{@\\|\\\\] # mouth\n |\n [\\)\\]\\(\\[dDpP/\\:\\}\\{@\\|\\\\] # mouth\n [\\-o\\*\\']? # optional nose\n [:;=8] # eyes\n [<>]?\n |\n <3 # heart\n )" URLS = '\t\t\t# Capture 1: entire matched URL\n (?:\n https?:\t\t\t\t# URL protocol and colon\n (?:\n /{1,3}\t\t\t\t# 1-3 slashes\n |\t\t\t\t\t# or\n [a-z0-9%]\t\t\t\t# Single letter or digit or \'%\'\n # (Trying not to match e.g. "URI::Escape")\n )\n |\t\t\t\t\t# or\n # looks like domain name followed by a slash:\n [a-z0-9.\\-]+[.]\n (?:[a-z]{2,13})\n /\n )\n (?:\t\t\t\t\t# One or more:\n [^\\s()<>{}\\[\\]]+\t\t\t# Run of non-space, non-()<>{}[]\n |\t\t\t\t\t# or\n \\([^\\s()]*?\\([^\\s()]+\\)[^\\s()]*?\\) # balanced parens, one level deep: (...(...)...)\n |\n \\([^\\s]+?\\)\t\t\t\t# balanced parens, non-recursive: (...)\n )+\n (?:\t\t\t\t\t# End with:\n \\([^\\s()]*?\\([^\\s()]+\\)[^\\s()]*?\\) # balanced parens, one level deep: (...(...)...)\n |\n \\([^\\s]+?\\)\t\t\t\t# balanced parens, non-recursive: (...)\n |\t\t\t\t\t# or\n [^\\s`!()\\[\\]{};:\'".,<>?«»“”‘’]\t# not a space or one of these punct chars\n )\n |\t\t\t\t\t# OR, the following to match naked domains:\n (?:\n (?\\s]+>', '[\\-]+>|<[\\-]+', '(?:@[\\w_]+)', "(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)", '[\\w.+-]+@[\\w-]+\\.(?:[\\w-]\\.?)+[\\w-]', "\n (?:[^\\W\\d_](?:[^\\W\\d_]|['\\-_])+[^\\W\\d_]) # Words with apostrophes or dashes.\n |\n (?:[+\\-]?\\d+[,/.:-]\\d+[+\\-]?) # Numbers, including fractions, decimals.\n |\n (?:[\\w_]+) # Words without apostrophes or dashes.\n |\n (?:\\.(?:\\s*\\.){1,}) # Ellipsis dots.\n |\n (?:\\S) # Everything else that isn't whitespace.\n ") WORD_RE = regex.compile('(%s)' % '|'.join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE) HANG_RE = regex.compile('([^a-zA-Z0-9])\\1{3,}') EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE) ENT_RE = regex.compile('&(#?(x?))([^&;\\s]+);') def _str_to_unicode(text, encoding=None, errors='strict'): if encoding is None: encoding = 'utf-8' if isinstance(text, bytes): return text.decode(encoding, errors) return text def _replace_html_entities(text, keep=(), remove_illegal=True, encoding='utf-8'): def _convert_entity(match): entity_body = match.group(3) if match.group(1): try: if match.group(2): number = int(entity_body, 16) else: number = int(entity_body, 10) if 128 <= number <= 159: return bytes((number,)).decode('cp1252') except ValueError: number = None elif entity_body in keep: return match.group(0) else: number = html.entities.name2codepoint.get(entity_body) if number is not None: try: return chr(number) except (ValueError, OverflowError): pass return '' if remove_illegal else match.group(0) return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding)) class TweetTokenizer: def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False): self.preserve_case = preserve_case self.reduce_len = reduce_len self.strip_handles = strip_handles def tokenize(self, text): text = _replace_html_entities(text) if self.strip_handles: text = remove_handles(text) if self.reduce_len: text = reduce_lengthening(text) safe_text = HANG_RE.sub('\\1\\1\\1', text) words = WORD_RE.findall(safe_text) if not self.preserve_case: words = [x if EMOTICON_RE.search(x) else x.lower() for x in words] return words def reduce_lengthening(text): pattern = regex.compile('(.)\\1{2,}') return pattern.sub('\\1\\1\\1', text) def remove_handles(text): pattern = regex.compile('(? Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py """""" import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, big_bird_config_file, pytorch_dump_path, is_trivia_qa): config = BigBirdConfig.from_json_file(big_bird_config_file) print(f'Building PyTorch model from configuration: {config}') if is_trivia_qa: model = BigBirdForQuestionAnswering(config) else: model = BigBirdForPreTraining(config) load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=is_trivia_qa) print(f'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--big_bird_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained BERT model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa) # File: transformers-main/src/transformers/models/big_bird/modeling_big_bird.py """""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/bigbird-roberta-base' _CONFIG_FOR_DOC = 'BigBirdConfig' _TRIVIA_QA_MAPPING = {'big_bird_attention': 'attention/self', 'output_layer_norm': 'output/LayerNorm', 'attention_output': 'attention/output/dense', 'output': 'output/dense', 'self_attention_layer_norm': 'attention/output/LayerNorm', 'intermediate': 'intermediate/dense', 'word_embeddings': 'bert/embeddings/word_embeddings', 'position_embedding': 'bert/embeddings/position_embeddings', 'type_embeddings': 'bert/embeddings/token_type_embeddings', 'embeddings': 'bert/embeddings', 'layer_normalization': 'output/LayerNorm', 'layer_norm': 'LayerNorm', 'trivia_qa_head': 'qa_classifier', 'dense': 'intermediate/dense', 'dense_1': 'qa_outputs'} def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for (name, shape) in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace('bert/encoder/LayerNorm', 'bert/embeddings/LayerNorm') logger.info(f'Loading TF weight {name} with shape {shape}') names.append(name) tf_weights[name] = array return (names, tf_weights) def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for (i, var) in enumerate(init_vars): name_items = var.name.split('/') if 'transformer_scaffold' in name_items[0]: layer_name_items = name_items[0].split('_') if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f'bert/encoder/layer_{layer_name_items[2]}' name = '/'.join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[:-2] if 'self/attention/output' in name: name = name.replace('self/attention/output', 'output') if i >= len(init_vars) - 2: name = name.replace('intermediate', 'output') logger.info(f'Loading TF weight {name} with shape {var.shape}') array = var.value().numpy() names.append(name) tf_weights[name] = array return (names, tf_weights) try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) if len(init_vars) <= 0: raise ValueError('Loaded trained variables cannot be empty.') pt_names = list(model.state_dict().keys()) if is_trivia_qa: (names, tf_weights) = load_tf_weights_trivia_qa(init_vars) else: (names, tf_weights) = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') pt_name.append('weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') pt_name.append('bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') pt_name.append('weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') pt_name.append('classifier') elif scope_names[0] == 'transform': pointer = getattr(pointer, 'transform') pt_name.append('transform') if 'bias' in name or 'kernel' in name: pointer = getattr(pointer, 'dense') pt_name.append('dense') elif 'beta' in name or 'gamma' in name: pointer = getattr(pointer, 'LayerNorm') pt_name.append('LayerNorm') else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f'{scope_names[0]}') except AttributeError: logger.info(f'Skipping {m_name}') continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f'{num}') if m_name[-11:] == '_embeddings' or m_name == 'embeddings': pointer = getattr(pointer, 'weight') pt_name.append('weight') elif m_name == 'kernel': array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): if txt_name.endswith('attention/self/key/kernel') or txt_name.endswith('attention/self/query/kernel') or txt_name.endswith('attention/self/value/kernel'): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith('attention/output/dense/kernel'): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}.') except ValueError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = '.'.join(pt_name) logger.info(f'Initialize PyTorch weight {pt_weight_name} from {txt_name}.') pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * self.hidden_size ** 0.5 token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None): (batch_size, seqlen, _) = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError('Query sided sequence length must be multiple of block size') if to_seq_length % to_block_size != 0: raise ValueError('Key/Value sided sequence length must be multiple of block size') query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) (context_layer, attention_probs) = self.bigbird_block_sparse_attention(query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(inp_1.shape[:ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)).view(inp_1.shape[:ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention(self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions): if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError('Error the number of blocks needs to be same!') rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: rand_attn = [self._bigbird_block_rand_mask(self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024)[:from_seq_len // from_block_size - 2] for _ in range(n_heads)] else: if plan_from_length is None: (plan_from_length, plan_num_rand_blocks) = self._get_rand_attn_plan(from_seq_len, from_block_size, n_rand_blocks) rand_attn = self._bigbird_block_rand_mask_with_head(from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view(bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view(bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax(first_product, dim=-1) first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) second_key_mat = torch.cat([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0]], dim=2) second_value_mat = torch.cat([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0]], dim=2) second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat([to_mask[:, :, :, :3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size])], dim=3) second_rand_pad = torch.cat([rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0]], dim=3) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax(second_product, dim=-1) second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) exp_blocked_key_matrix = torch.cat([blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3) exp_blocked_value_matrix = torch.cat([blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3) middle_query_matrix = blocked_query_matrix[:, :, 2:-2] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) inner_band_product = inner_band_product * rsqrt_d rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) rand_band_product = rand_band_product * rsqrt_d first_band_product = torch.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, 0]) first_band_product = first_band_product * rsqrt_d last_band_product = torch.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, -1]) last_band_product = last_band_product * rsqrt_d inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty band_product = torch.cat([first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1) attn_weights = nn.functional.softmax(band_product, dim=-1) context_layer = self.torch_bmm_nd(attn_weights[:, :, :, :, to_block_size:4 * to_block_size], exp_blocked_value_matrix, ndim=5) context_layer += self.torch_bmm_nd(attn_weights[:, :, :, :, 4 * to_block_size:-to_block_size], gathered_value[:, :, 1:-1], ndim=5) context_layer += torch.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]) context_layer += torch.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]) second_last_key_mat = torch.cat([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1]], dim=2) second_last_value_mat = torch.cat([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1]], dim=2) second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat([to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size])], dim=3) second_last_rand_pad = torch.cat([rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1]], dim=3) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax(second_last_product, dim=-1) second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) context_layer = torch.cat([first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) if output_attentions: attention_probs = torch.zeros(bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device) attention_probs[:, :, :from_block_size, :] = first_attn_weights attention_probs[:, :, from_block_size:2 * from_block_size, :3 * to_block_size] = second_attn_weights[:, :, :, :3 * to_block_size] attention_probs[:, :, from_block_size:2 * from_block_size, -to_block_size:] = second_attn_weights[:, :, :, 3 * to_block_size:4 * to_block_size] for (p1, i1, w1) in zip(range(bsz), rand_attn, second_attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[:, 4 * to_block_size:] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size)[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size:4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx:q_idx + 3, :] = right_slice.view(bsz, n_heads, from_block_size, 3, to_block_size) attention_probs[:, :, 2 * from_block_size:-2 * from_block_size, :to_block_size] = attn_weights[:, :, :, :, :to_block_size].view(bsz, n_heads, -1, to_block_size) attention_probs[:, :, 2 * from_block_size:-2 * from_block_size, -to_block_size:] = attn_weights[:, :, :, :, -to_block_size:].view(bsz, n_heads, -1, to_block_size) for (p1, i1, w1) in zip(range(bsz), rand_attn, attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[q_idx - 1, :, 4 * to_block_size:-to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) attention_probs[:, :, -2 * from_block_size:-from_block_size, :to_block_size] = second_last_attn_weights[:, :, :, :to_block_size] attention_probs[:, :, -2 * from_block_size:-from_block_size, -3 * to_block_size:] = second_last_attn_weights[:, :, :, to_block_size:4 * to_block_size] for (p1, i1, w1) in zip(range(bsz), rand_attn, second_last_attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[:, 4 * to_block_size:] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) attention_probs[:, :, -from_block_size:, :] = last_attn_weights else: attention_probs = None return (context_layer, attention_probs) @staticmethod def torch_gather_b2(params, indices): if params.shape[:2] != indices.shape[:2]: raise ValueError(f'Make sure that the first two dimensions of params and indices are identical, but they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}') num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode='floor') * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size): num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for (p1, i1) in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): plan_from_length = [] plan_num_rand_blocks = [] if 2 * num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - num_rand_blocks // 2) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return (plan_from_length, plan_num_rand_blocks) def _bigbird_block_rand_mask(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) if not self.training: return rand_attn middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > 2 * to_block_size: last = last_idx // to_block_size - 1 r = num_rand_blocks for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] elif start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif end + 1 == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation(np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r] return rand_attn def _bigbird_block_rand_mask_with_head(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') if from_seq_length not in plan_from_length: raise ValueError('Error from sequence length not in plan!') num_blocks = from_seq_length // from_block_size plan_block_length = np.array(plan_from_length) // from_block_size max_plan_idx = plan_from_length.index(from_seq_length) rand_attn = [np.zeros((num_blocks, np.sum(plan_num_rand_blocks[:max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads)] if not self.training: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention(block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1): to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) perm_block = np.random.permutation(to_block_list) illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) if block_id == 1: illegal_blocks.append(to_end_block_id - 2) if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == 'original_full': self.self = BigBirdSelfAttention(config) elif self.config.attention_type == 'block_sparse': self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError(f'attention_type can either be original_full or block_sparse, but is {self.config.attention_type}') self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value if value == 'original_full': attn_weights = BigBirdSelfAttention(self.config) else: attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None): if band_mask is not None: band_mask = band_mask.to(hidden_states.dtype) if from_mask is not None: from_mask = from_mask.to(hidden_states.dtype) if to_mask is not None: to_mask = to_mask.to(hidden_states.dtype) if self.attention_type == 'original_full': self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: if encoder_hidden_states is not None: raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'") self_outputs = self.self(hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList([BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class BigBirdPreTrainedModel(PreTrainedModel): config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = 'bert' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIG_BIRD_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BIG_BIRD_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @dataclass class BigBirdForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings('The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.', BIG_BIRD_START_DOCSTRING) class BigBirdModel(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != 'original_full' and config.add_cross_attention: logger.warning('When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`') self.set_attention_type('original_full') self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == 'block_sparse' and seq_length <= max_tokens_to_attend: sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning(f"Attention type 'block_sparse' is not possible if sequence_length: {sequence_length} <= num global tokens: 2 * config.block_size + min. num sliding tokens: 3 * config.block_size + config.num_random_blocks * config.block_size + additional buffer: config.num_random_blocks * config.block_size = {max_tokens_to_attend} with config.block_size = {self.config.block_size}, config.num_random_blocks = {self.config.num_random_blocks}. Changing attention type to 'original_full'...") self.set_attention_type('original_full') if self.attention_type == 'block_sparse': (padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds) = self._pad_to_block_size(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id) else: padding_len = 0 if self.attention_type == 'block_sparse': (blocked_encoder_mask, band_mask, from_mask, to_mask) = self.create_masks_for_block_sparse_attn(attention_mask, self.block_size) extended_attention_mask = None elif self.attention_type == 'original_full': blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) else: raise ValueError(f'attention_type can either be original_full or block_sparse, but is {self.attention_type}') if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if self.pooler is not None else None if padding_len > 0: sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): (batch_size, seq_length) = attention_mask.size() if seq_length % block_size != 0: raise ValueError(f'Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.') def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): exp_blocked_to_pad = torch.cat([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2) band_mask = torch.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return (blocked_encoder_mask, band_mask, from_mask, to_mask) def _pad_to_block_size(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int): block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape (batch_size, seq_len) = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.warning_once(f'Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of `config.block_size`: {block_size}') if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full((batch_size, padding_len), self.config.pad_token_id, dtype=torch.long) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad(attention_mask, (0, padding_len), value=False) token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) return (padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds) class BigBirdForPreTraining(BigBirdPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, next_sentence_label: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] (prediction_scores, seq_relationship_score) = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return BigBirdForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('BigBird Model with a `language modeling` head on top.', BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask} @add_start_docstrings('BigBird Model with a `language modeling` head on top for CLM fine-tuning.', BIG_BIRD_START_DOCSTRING) class BigBirdForCausalLM(BigBirdPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`') self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past class BigBirdClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings('\n BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', BIG_BIRD_START_DOCSTRING) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', BIG_BIRD_START_DOCSTRING) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BIG_BIRD_START_DOCSTRING) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class BigBirdForQuestionAnsweringHead(nn.Module): def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings('\n BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BIG_BIRD_START_DOCSTRING) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, question_lengths: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: logits = logits - logits_mask * 1000000.0 (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) mask = torch.where(mask < q_lengths, 1, 0) return mask # File: transformers-main/src/transformers/models/big_bird/modeling_flax_big_bird.py from typing import Callable, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/bigbird-roberta-base' _CONFIG_FOR_DOC = 'BigBirdConfig' remat = nn_partitioning.remat @flax.struct.dataclass class FlaxBigBirdForPreTrainingOutput(ModelOutput): prediction_logits: jnp.ndarray = None seq_relationship_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxBigBirdForQuestionAnsweringModelOutput(ModelOutput): start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None pooled_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None BIG_BIRD_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' BIG_BIRD_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n head_mask (`numpy.ndarray` of shape `({0})`, `optional):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n' class FlaxBigBirdEmbeddings(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.word_embeddings = nn.Embed(self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.position_embeddings = nn.Embed(self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.token_type_embeddings = nn.Embed(self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool=True): inputs_embeds = self.word_embeddings(input_ids.astype('i4')) position_embeds = self.position_embeddings(position_ids.astype('i4')) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype('i4')) if self.config.rescale_embeddings: inputs_embeds *= self.config.hidden_size ** 0.5 hidden_states = inputs_embeds + token_type_embeddings + position_embeds hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states) return hidden_states class FlaxBigBirdSelfAttention(nn.Module): config: BigBirdConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError('`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` : {self.config.num_attention_heads}') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic=True, output_attentions: bool=False): is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.query(hidden_states) if is_cross_attention: key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) if layer_head_mask is not None: attn_weights = jnp.einsum('...hqk,h->...hqk', attn_weights, layer_head_mask) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxBigBirdBlockSparseAttention(nn.Module): config: BigBirdConfig block_sparse_seed: int = None dtype: jnp.dtype = jnp.float32 def setup(self): self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) @staticmethod def transpose_for_scores(x, n_heads, head_size): new_x_shape = x.shape[:-1] + (n_heads, head_size) x = x.reshape(*new_x_shape) return jnp.transpose(x, axes=(0, 2, 1, 3)) def __call__(self, hidden_states, attention_mask, deterministic=True, output_attentions=False): n_heads = self.config.num_attention_heads head_size = self.config.hidden_size // n_heads (blocked_encoder_mask, band_mask, from_mask, to_mask) = self.create_masks_for_block_sparse_attn(attention_mask, self.config.block_size) query_layer = self.transpose_for_scores(self.query(hidden_states), n_heads, head_size) key_layer = self.transpose_for_scores(self.key(hidden_states), n_heads, head_size) value_layer = self.transpose_for_scores(self.value(hidden_states), n_heads, head_size) indices_prng_key = None if not deterministic: indices_prng_key = self.make_rng('indices') (attn_output, attn_weights) = self.bigbird_block_sparse_attention(query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, blocked_encoder_mask, blocked_encoder_mask, n_heads, head_size, indices_prng_key=indices_prng_key, deterministic=deterministic, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs @staticmethod def create_masks_for_block_sparse_attn(attention_mask, block_size: int): (batch_size, seq_length) = attention_mask.shape if seq_length % block_size != 0: raise ValueError(f'Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.') def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): exp_blocked_to_pad = jnp.concatenate([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], axis=2) band_mask = jnp.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask = jnp.expand_dims(band_mask, 1) return band_mask blocked_encoder_mask = attention_mask.reshape(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.reshape(batch_size, 1, seq_length, 1) to_mask = attention_mask.reshape(batch_size, 1, 1, seq_length) return (blocked_encoder_mask, band_mask, from_mask, to_mask) def bigbird_block_sparse_attention(self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, head_size, indices_prng_key: Optional[jax.random.PRNGKey]=None, deterministic: Optional[bool]=True, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=None): (bsz, _, from_seq_len, _) = query_layer.shape to_seq_len = key_layer.shape[2] from_block_size = to_block_size = self.config.block_size if from_seq_len % from_block_size != 0: raise ValueError('Query sided sequence length must be multiple of block size') if to_seq_len % to_block_size != 0: raise ValueError('Key/Value sided sequence length must be multiple of block size') if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError('Error the number of blocks needs to be same!') n_rand_blocks = self.config.num_random_blocks rsqrt_d = 1 / jnp.sqrt(head_size) attn_mask_penalty = -10000.0 if from_seq_len in [1024, 3072, 4096]: max_seqlen = self.config.max_position_embeddings rand_attn = [self._bigbird_block_rand_mask(max_seqlen, max_seqlen, from_block_size, to_block_size, n_rand_blocks, indices_prng_key=indices_prng_key, deterministic=deterministic, last_idx=1024)[:from_seq_len // from_block_size - 2] for _ in range(n_heads)] else: if plan_from_length is None: (plan_from_length, plan_num_rand_blocks) = self._get_rand_attn_plan(from_seq_len, from_block_size, n_rand_blocks) rand_attn = self._bigbird_block_rand_mask_with_head(from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, indices_prng_key=indices_prng_key) rand_attn = jnp.stack(rand_attn, axis=0) rand_attn = jnp.broadcast_to(rand_attn, (bsz,) + rand_attn.shape) rand_mask = self._create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size) blocked_query_matrix = query_layer.reshape(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.reshape(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.reshape(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) shape = (bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) gathered_key = self.jax_gather(blocked_key_matrix, rand_attn, batch_dims=2).reshape(*shape) gathered_value = self.jax_gather(blocked_value_matrix, rand_attn, batch_dims=2).reshape(*shape) first_product = jnp.einsum('bhqd,bhkd->bhqk', blocked_query_matrix[:, :, 0], key_layer) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = jax.nn.softmax(first_product, axis=-1) first_context_layer = jnp.einsum('bhqk,bhkd->bhqd', first_attn_weights, value_layer) first_context_layer = jnp.expand_dims(first_context_layer, 2) second_key_mat = jnp.concatenate([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0]], axis=2) second_value_mat = jnp.concatenate([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0]], axis=2) second_product = jnp.einsum('bhqd,bhkd->bhqk', blocked_query_matrix[:, :, 1], second_key_mat) second_seq_pad = jnp.concatenate([to_mask[:, :, :, :3 * to_block_size], to_mask[:, :, :, -to_block_size:], jnp.ones([bsz, 1, 1, n_rand_blocks * to_block_size], dtype=to_mask.dtype)], axis=3) second_rand_pad = jnp.concatenate([jnp.ones([bsz, n_heads, from_block_size, 4 * to_block_size], dtype=rand_mask.dtype), rand_mask[:, :, 0]], axis=3) second_product = second_product * rsqrt_d second_product += (1.0 - jnp.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = jax.nn.softmax(second_product, axis=-1) second_context_layer = jnp.einsum('bhqk,bhkd->bhqd', second_attn_weights, second_value_mat) second_context_layer = jnp.expand_dims(second_context_layer, 2) exp_blocked_key_matrix = jnp.concatenate([blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], axis=3) exp_blocked_value_matrix = jnp.concatenate([blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], axis=3) middle_query_matrix = blocked_query_matrix[:, :, 2:-2] inner_band_product = jnp.einsum('bhlqd,bhlkd->bhlqk', middle_query_matrix, exp_blocked_key_matrix) inner_band_product = inner_band_product * rsqrt_d rand_band_product = jnp.einsum('bhlqd,bhlkd->bhlqk', middle_query_matrix, gathered_key[:, :, 1:-1]) rand_band_product = rand_band_product * rsqrt_d first_band_product = jnp.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, 0]) first_band_product = first_band_product * rsqrt_d last_band_product = jnp.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, -1]) last_band_product = last_band_product * rsqrt_d inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - jnp.expand_dims(to_mask[:, :, :, :to_block_size], 3)) * attn_mask_penalty last_band_product += (1.0 - jnp.expand_dims(to_mask[:, :, :, -to_block_size:], 3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty band_product = jnp.concatenate([first_band_product, inner_band_product, rand_band_product, last_band_product], axis=-1) attn_weights = jax.nn.softmax(band_product, axis=-1) context_layer = jnp.einsum('bhlqk,bhlkd->bhlqd', attn_weights[:, :, :, :, to_block_size:4 * to_block_size], exp_blocked_value_matrix) context_layer += jnp.einsum('bhlqk,bhlkd->bhlqd', attn_weights[:, :, :, :, 4 * to_block_size:-to_block_size], gathered_value[:, :, 1:-1]) context_layer += jnp.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]) context_layer += jnp.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]) second_last_key_mat = jnp.concatenate([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1]], axis=2) second_last_value_mat = jnp.concatenate([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1]], axis=2) second_last_product = jnp.einsum('bhqd,bhkd->bhqk', blocked_query_matrix[:, :, -2], second_last_key_mat) second_last_seq_pad = jnp.concatenate([to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size:], jnp.ones([bsz, 1, 1, n_rand_blocks * to_block_size], dtype=to_mask.dtype)], axis=3) second_last_rand_pad = jnp.concatenate([jnp.ones([bsz, n_heads, from_block_size, 4 * to_block_size], dtype=rand_mask.dtype), rand_mask[:, :, -1]], axis=3) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - jnp.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = jax.nn.softmax(second_last_product, axis=-1) second_last_context_layer = jnp.einsum('bhqk,bhkd->bhqd', second_last_attn_weights, second_last_value_mat) second_last_context_layer = jnp.expand_dims(second_last_context_layer, 2) last_product = jnp.einsum('bhqd,bhkd->bhqk', blocked_query_matrix[:, :, -1], key_layer) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = jax.nn.softmax(last_product, axis=-1) last_context_layer = jnp.einsum('bhqk,bhkd->bhqd', last_attn_weights, value_layer) last_context_layer = jnp.expand_dims(last_context_layer, 2) context_layer = jnp.concatenate([first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], axis=2) context_layer = context_layer.reshape(bsz, n_heads, from_seq_len, -1) * from_mask context_layer = jnp.transpose(context_layer, axes=(0, 2, 1, 3)).reshape(bsz, from_seq_len, -1) attention_probs = None return (context_layer, attention_probs) @staticmethod def jax_gather(params, indices, batch_dims=2): def _jax_gather(params, indices): return params[indices] for _ in range(batch_dims): _jax_gather = jax.vmap(_jax_gather, in_axes=(0, 0)) return _jax_gather(params, indices) def _create_rand_mask_from_inputs(self, from_blocked_mask, to_blocked_mask, broadcasted_rand_attn, num_attention_heads, num_random_blocks, batch_size, from_seq_length, from_block_size): num_windows = from_seq_length // from_block_size - 2 rand_mask = self.jax_gather(to_blocked_mask, broadcasted_rand_attn, batch_dims=1) rand_mask = rand_mask.reshape(batch_size, num_attention_heads, num_windows, num_random_blocks * from_block_size) rand_mask = jnp.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): plan_from_length = [] plan_num_rand_blocks = [] if 2 * num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - num_rand_blocks // 2) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return (plan_from_length, plan_num_rand_blocks) @staticmethod def _bigbird_block_rand_mask(from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey]=None, deterministic: Optional[bool]=True, last_idx: Optional[int]=-1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') rand_attn = jnp.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=jnp.int32) if deterministic: return rand_attn middle_seq = jnp.arange(1, to_seq_length // to_block_size - 1, dtype=jnp.int32) last = to_seq_length // to_block_size - 1 if last_idx > 2 * to_block_size: last = last_idx // to_block_size - 1 r = num_rand_blocks for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: seq_values = jax.random.permutation(indices_prng_key, middle_seq[2:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif i == 2: seq_values = jax.random.permutation(indices_prng_key, middle_seq[3:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif i == from_seq_length // from_block_size - 3: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif i == from_seq_length // from_block_size - 2: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif start > last: start = last seq_values = jax.random.permutation(indices_prng_key, middle_seq[:start])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif end + 1 == last: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:start])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) else: concat_values = jnp.concatenate((middle_seq[:start], middle_seq[end + 1:last])) seq_values = jax.random.permutation(indices_prng_key, concat_values)[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) return rand_attn def _bigbird_block_rand_mask_with_head(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey]=None, deterministic: Optional[bool]=True, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') if from_seq_length not in plan_from_length: raise ValueError('Error from sequence length not in plan!') num_blocks = from_seq_length // from_block_size plan_block_length = jnp.array(plan_from_length) // from_block_size max_plan_idx = plan_from_length.index(from_seq_length) rand_attn = [jnp.zeros((num_blocks, sum(plan_num_rand_blocks[:max_plan_idx + 1])), dtype=jnp.int32) for i in range(num_heads)] if deterministic: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key) rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(sum(plan_num_rand_blocks[:pl_id + 1])) for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key) rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key) rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention(block_id, to_start_block_id, to_end_block_id, num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey]=None, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1): to_block_list = jnp.arange(to_start_block_id, to_end_block_id, dtype=jnp.int32) perm_block = jax.random.permutation(indices_prng_key, to_block_list) illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) if block_id == 1: illegal_blocks.append(to_end_block_id - 2) if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blocks = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blocks.append(perm_block[i]) if len(selected_random_blocks) == num_rand_blocks: break return jnp.array(selected_random_blocks, dtype=jnp.int32) class FlaxBigBirdSelfOutput(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FlaxBigBirdAttention(nn.Module): config: BigBirdConfig layer_id: int = None causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.attention_type == 'original_full': self.self = FlaxBigBirdSelfAttention(self.config, causal=self.causal, dtype=self.dtype) elif self.config.attention_type == 'block_sparse': self.self = FlaxBigBirdBlockSparseAttention(self.config, block_sparse_seed=self.layer_id, dtype=self.dtype) else: raise ValueError(f'Your `config.attention_type` is {self.config.attention_type} but it can either be `original_full` or `block_sparse`') self.output = FlaxBigBirdSelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool=False): if self.config.attention_type == 'original_full': attn_outputs = self.self(hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) else: attn_outputs = self.self(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxBigBirdIntermediate(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxBigBirdOutput(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states class FlaxBigBirdLayer(nn.Module): config: BigBirdConfig layer_id: int = None dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxBigBirdAttention(self.config, layer_id=self.layer_id, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxBigBirdIntermediate(self.config, dtype=self.dtype) self.output = FlaxBigBirdOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxBigBirdAttention(self.config, causal=False, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False): attention_outputs = self.attention(hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) attention_output = attention_outputs[0] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs class FlaxBigBirdLayerCollection(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxBigBirdCheckpointLayer = remat(FlaxBigBirdLayer, static_argnums=(5, 6, 7)) self.layers = [FlaxBigBirdCheckpointLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] else: self.layers = [FlaxBigBirdLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None if head_mask is not None: if head_mask.shape[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.shape[0]}.') for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) class FlaxBigBirdEncoder(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.layer = FlaxBigBirdLayerCollection(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layer(hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxBigBirdPredictionHeadTransform(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) class FlaxBigBirdLMPredictionHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = FlaxBigBirdPredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param('bias', self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states class FlaxBigBirdOnlyMLMHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBigBirdLMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states class FlaxBigBirdPreTrainingHeads(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBigBirdLMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class FlaxBigBirdPreTrainedModel(FlaxPreTrainedModel): config_class = BigBirdConfig base_model_prefix = 'bert' module_class: nn.Module = None def __init__(self, config: BigBirdConfig, input_shape: Optional[tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, gradient_checkpointing: bool=False, **kwargs): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) if config.attention_type == 'block_sparse' and input_shape is None: input_shape = (1, 12 * config.block_size) elif input_shape is None: input_shape = (1, 1) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class(config=self.config, dtype=self.dtype, gradient_checkpointing=True) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) (params_rng, dropout_rng, indices_rng) = jax.random.split(rng, num=3) rngs = {'params': params_rng, 'dropout': dropout_rng, 'indices': indices_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False) random_params = module_init_outputs['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict=None, dropout_rng: Optional[jax.random.PRNGKey]=None, indices_rng: Optional[jax.random.PRNGKey]=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, past_key_values: dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) rngs = {} if indices_rng is not None: rngs['indices'] = indices_rng if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if self.config.add_cross_attention: if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] else: outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs) return outputs class FlaxBigBirdModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxBigBirdEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxBigBirdEncoder(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.pooler = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): hidden_states = self.embeddings(input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic) outputs = self.encoder(hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] pooled = nn.tanh(self.pooler(hidden_states[:, 0, :])) if self.add_pooling_layer else None if not return_dict: if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdModel(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdModule append_call_sample_docstring(FlaxBigBirdModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC) class FlaxBigBirdForPreTrainingModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBigBirdPreTrainingHeads(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None hidden_states = outputs[0] pooled_output = outputs[1] (prediction_scores, seq_relationship_score) = self.cls(hidden_states, pooled_output, shared_embedding=shared_embedding) if not return_dict: return (prediction_scores, seq_relationship_score) + outputs[2:] return FlaxBigBirdForPreTrainingOutput(prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForPreTraining(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForPreTrainingModule FLAX_BIG_BIRD_FOR_PRETRAINING_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxBigBirdForPreTraining\n\n >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base")\n >>> model = FlaxBigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base")\n\n >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n ```\n' overwrite_call_docstring(FlaxBigBirdForPreTraining, BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FLAX_BIG_BIRD_FOR_PRETRAINING_DOCSTRING) append_replace_return_docstrings(FlaxBigBirdForPreTraining, output_type=FlaxBigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) class FlaxBigBirdForMaskedLMModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBigBirdOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('BigBird Model with a `language modeling` head on top.', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForMaskedLM(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForMaskedLMModule append_call_sample_docstring(FlaxBigBirdForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) class FlaxBigBirdClassificationHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, features, deterministic=True): x = features[:, 0, :] x = self.dropout(x, deterministic=deterministic) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x, deterministic=deterministic) x = self.out_proj(x) return x class FlaxBigBirdForSequenceClassificationModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.classifier = FlaxBigBirdClassificationHead(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForSequenceClassification(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForSequenceClassificationModule append_call_sample_docstring(FlaxBigBirdForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxBigBirdForMultipleChoiceModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput(logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForMultipleChoice(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForMultipleChoiceModule def __init__(self, config: BigBirdConfig, input_shape: Optional[tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): if config.attention_type == 'block_sparse' and input_shape is None: input_shape = (1, 1, 12 * config.block_size) elif input_shape is None: input_shape = (1, 1) super().__init__(config, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) overwrite_call_docstring(FlaxBigBirdForMultipleChoice, BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) append_call_sample_docstring(FlaxBigBirdForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC) class FlaxBigBirdForTokenClassificationModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForTokenClassification(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForTokenClassificationModule append_call_sample_docstring(FlaxBigBirdForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC) class FlaxBigBirdForQuestionAnsweringHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.intermediate = FlaxBigBirdIntermediate(self.config, dtype=self.dtype) self.output = FlaxBigBirdOutput(self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, encoder_output, deterministic=True): hidden_states = self.dropout(encoder_output, deterministic=deterministic) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states class FlaxBigBirdForQuestionAnsweringModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = False gradient_checkpointing: bool = False def setup(self): self.config.num_labels = 2 self.bert = FlaxBigBirdModule(self.config, dtype=self.dtype, add_pooling_layer=self.add_pooling_layer, gradient_checkpointing=self.gradient_checkpointing) self.qa_classifier = FlaxBigBirdForQuestionAnsweringHead(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, logits_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] pooled_output = outputs[1] if self.add_pooling_layer else None logits = self.qa_classifier(hidden_states, deterministic=deterministic) if logits_mask is not None: logits = logits - logits_mask * 1000000.0 (start_logits, end_logits) = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxBigBirdForQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, pooled_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForQuestionAnswering(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForQuestionAnsweringModule @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, question_lengths=None, params: dict=None, dropout_rng: Optional[jax.random.PRNGKey]=None, indices_rng: Optional[jax.random.PRNGKey]=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) if question_lengths is None and input_ids is not None: question_lengths = jnp.argmax((input_ids == self.config.sep_token_id).astype('i4'), axis=-1) + 1 question_lengths = jnp.expand_dims(question_lengths, axis=1) seqlen = input_ids.shape[1] logits_mask = None if question_lengths is not None: logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = (~logits_mask).astype('i4') logits_mask = jnp.expand_dims(logits_mask, axis=2) logits_mask = logits_mask.at[:, 0].set(False) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng if indices_rng is not None: rngs['indices'] = indices_rng return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids, jnp.array(position_ids, dtype='i4'), jnp.array(head_mask, dtype='i4'), logits_mask, not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) @staticmethod def prepare_question_mask(q_lengths, maxlen: int): mask = jnp.arange(0, maxlen) mask = jnp.expand_dims(mask, axis=0) < q_lengths return mask append_call_sample_docstring(FlaxBigBirdForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxBigBirdForQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) class FlaxBigBirdForCausalLMModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule(config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.cls = FlaxBigBirdOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray]=None, head_mask: Optional[jnp.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables['params']['embeddings']['word_embeddings']['embedding'] else: shared_embedding = None logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('\n BigBird Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for\n autoregressive tasks.\n ', BIG_BIRD_START_DOCSTRING) class FlaxBigBirdForCausalLM(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxBigBirdForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/big_bird/tokenization_big_bird.py """""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} class BigBirdTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: List[int] = [] def __init__(self, vocab_file, unk_token='', bos_token='', eos_token='', pad_token='', sep_token='[SEP]', mask_token='[MASK]', cls_token='[CLS]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, spaces_between_special_tokens: bool=True, **kwargs) -> str: self._decode_use_source_tokenizer = kwargs.pop('use_source_tokenizer', False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) if spaces_between_special_tokens: text = re.sub(' (\\[(MASK|SEP)\\])', '\\1', ' '.join(sub_texts)) else: text = ''.join(sub_texts) clean_up_tokenization_spaces = clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] # File: transformers-main/src/transformers/models/big_bird/tokenization_big_bird_fast.py """""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: BigBirdTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SPIECE_UNDERLINE = '▁' class BigBirdTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BigBirdTokenizer model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: List[int] = [] def __init__(self, vocab_file=None, tokenizer_file=None, unk_token='', bos_token='', eos_token='', pad_token='', sep_token='[SEP]', mask_token='[MASK]', cls_token='[CLS]', **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if token_ids_1 is not None: raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # File: transformers-main/src/transformers/models/bigbird_pegasus/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_bigbird_pegasus': ['BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bigbird_pegasus'] = ['BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel'] if TYPE_CHECKING: from .configuration_bigbird_pegasus import BigBirdPegasusConfig, BigBirdPegasusOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py """""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) class BigBirdPegasusConfig(PretrainedConfig): model_type = 'bigbird_pegasus' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model', 'attention_probs_dropout_prob': 'attention_dropout'} def __init__(self, vocab_size=96103, max_position_embeddings=4096, encoder_layers=16, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=16, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu_new', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, scale_embedding=True, pad_token_id=0, bos_token_id=2, eos_token_id=1, attention_type='block_sparse', block_size=64, num_random_blocks=3, use_bias=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding self.attention_type = attention_type self.block_size = block_size self.num_random_blocks = num_random_blocks self.use_bias = use_bias super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs) class BigBirdPegasusOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework) decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, encoder_seq_length) = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] (num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] (num_encoder_layers, num_decoder_layers) = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 (num_encoder_layers, _) = self.num_layers (num_encoder_attention_heads, _) = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) # File: transformers-main/src/transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration INIT_COMMON = [('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model')] END_COMMON = [('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1')] DECODER_PATTERNS = INIT_COMMON + [('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding')] + END_COMMON REMAINING_PATTERNS = INIT_COMMON + [('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding')] + END_COMMON KEYS_TO_IGNORE = ['encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias'] def rename_state_dict_key(k, patterns): for (tf_name, hf_name) in patterns: k = k.replace(tf_name, hf_name) return k def convert_bigbird_pegasus(tf_weights: dict, config_update: dict) -> BigBirdPegasusForConditionalGeneration: cfg = BigBirdPegasusConfig(**config_update) torch_model = BigBirdPegasusForConditionalGeneration(cfg) state_dict = torch_model.state_dict() mapping = {} decoder_weights = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder')} remaining_weights = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder')} for (k, v) in tqdm(decoder_weights.items(), 'tf -> hf conversion'): conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE] if any(conditions): continue patterns = DECODER_PATTERNS new_k = rename_state_dict_key(k, patterns) if new_k not in state_dict: raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})') if any((True if i in k else False for i in ['dense', 'query', 'key', 'value'])): v = v.T mapping[new_k] = torch.from_numpy(v) assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for (k, v) in tqdm(remaining_weights.items(), 'tf -> hf conversion'): conditions = [k.endswith(ending) for ending in KEYS_TO_IGNORE] if any(conditions): continue patterns = REMAINING_PATTERNS new_k = rename_state_dict_key(k, patterns) if new_k not in state_dict and k != 'pegasus/embeddings/position_embeddings': raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})') if any((True if i in k else False for i in ['dense', 'query', 'key', 'value'])): v = v.T mapping[new_k] = torch.from_numpy(v) if k != 'pegasus/embeddings/position_embeddings': assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' mapping['model.encoder.embed_positions.weight'] = mapping['model.embed_positions.weight'] mapping['model.decoder.embed_positions.weight'] = mapping.pop('model.embed_positions.weight') (missing, extra) = torch_model.load_state_dict(mapping, strict=False) unexpected_missing = [k for k in missing if k not in ['final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight']] assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}' assert extra == [], f'no matches found for the following tf keys {extra}' return torch_model def get_tf_weights_as_numpy(path) -> Dict: init_vars = tf.train.list_variables(path) tf_weights = {} ignore_name = ['global_step'] for (name, shape) in tqdm(init_vars, desc='converting tf checkpoint to dict'): skip_key = any((pat in name for pat in ignore_name)) if skip_key: continue array = tf.train.load_variable(path, name) tf_weights[name] = array return tf_weights def convert_bigbird_pegasus_ckpt_to_pytorch(ckpt_path: str, save_dir: str, config_update: dict): tf_weights = get_tf_weights_as_numpy(ckpt_path) torch_model = convert_bigbird_pegasus(tf_weights, config_update) torch_model.save_pretrained(save_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') args = parser.parse_args() config_update = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) # File: transformers-main/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py """""" import copy import math from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bigbird_pegasus import BigBirdPegasusConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/bigbird-pegasus-large-arxiv' _CONFIG_FOR_DOC = 'BigBirdPegasusConfig' _EXPECTED_OUTPUT_SHAPE = [1, 7, 1024] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class BigBirdPegasusLearnedPositionalEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0): (bsz, seq_len) = input_ids_shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions) class BigBirdPegasusScaledWordEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class BigBirdPegasusSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdPegasusBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None): (batch_size, seqlen, _) = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError('Query sided sequence length must be multiple of block size') if to_seq_length % to_block_size != 0: raise ValueError('Key/Value sided sequence length must be multiple of block size') query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) (context_layer, attention_probs) = self.bigbird_block_sparse_attention(query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(inp_1.shape[:ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)).view(inp_1.shape[:ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention(self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions): if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError('Error the number of blocks needs to be same!') rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: rand_attn = [self._bigbird_block_rand_mask(self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024)[:from_seq_len // from_block_size - 2] for _ in range(n_heads)] else: if plan_from_length is None: (plan_from_length, plan_num_rand_blocks) = self._get_rand_attn_plan(from_seq_len, from_block_size, n_rand_blocks) rand_attn = self._bigbird_block_rand_mask_with_head(from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view(bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view(bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax(first_product, dim=-1) first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) second_key_mat = torch.cat([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0]], dim=2) second_value_mat = torch.cat([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0]], dim=2) second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat([to_mask[:, :, :, :3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size])], dim=3) second_rand_pad = torch.cat([rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0]], dim=3) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax(second_product, dim=-1) second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) exp_blocked_key_matrix = torch.cat([blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3) exp_blocked_value_matrix = torch.cat([blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3) middle_query_matrix = blocked_query_matrix[:, :, 2:-2] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) inner_band_product = inner_band_product * rsqrt_d rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) rand_band_product = rand_band_product * rsqrt_d first_band_product = torch.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, 0]) first_band_product = first_band_product * rsqrt_d last_band_product = torch.einsum('bhlqd,bhkd->bhlqk', middle_query_matrix, blocked_key_matrix[:, :, -1]) last_band_product = last_band_product * rsqrt_d inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty band_product = torch.cat([first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1) attn_weights = nn.functional.softmax(band_product, dim=-1) context_layer = self.torch_bmm_nd(attn_weights[:, :, :, :, to_block_size:4 * to_block_size], exp_blocked_value_matrix, ndim=5) context_layer += self.torch_bmm_nd(attn_weights[:, :, :, :, 4 * to_block_size:-to_block_size], gathered_value[:, :, 1:-1], ndim=5) context_layer += torch.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]) context_layer += torch.einsum('bhlqk,bhkd->bhlqd', attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]) second_last_key_mat = torch.cat([blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1]], dim=2) second_last_value_mat = torch.cat([blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1]], dim=2) second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat([to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size])], dim=3) second_last_rand_pad = torch.cat([rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1]], dim=3) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax(second_last_product, dim=-1) second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) context_layer = torch.cat([first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) if output_attentions: attention_probs = torch.zeros(bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device) attention_probs[:, :, :from_block_size, :] = first_attn_weights attention_probs[:, :, from_block_size:2 * from_block_size, :3 * to_block_size] = second_attn_weights[:, :, :, :3 * to_block_size] attention_probs[:, :, from_block_size:2 * from_block_size, -to_block_size:] = second_attn_weights[:, :, :, 3 * to_block_size:4 * to_block_size] for (p1, i1, w1) in zip(range(bsz), rand_attn, second_attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[:, 4 * to_block_size:] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size)[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size:4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx:q_idx + 3, :] = right_slice.view(bsz, n_heads, from_block_size, 3, to_block_size) attention_probs[:, :, 2 * from_block_size:-2 * from_block_size, :to_block_size] = attn_weights[:, :, :, :, :to_block_size].view(bsz, n_heads, -1, to_block_size) attention_probs[:, :, 2 * from_block_size:-2 * from_block_size, -to_block_size:] = attn_weights[:, :, :, :, -to_block_size:].view(bsz, n_heads, -1, to_block_size) for (p1, i1, w1) in zip(range(bsz), rand_attn, attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[q_idx - 1, :, 4 * to_block_size:-to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) attention_probs[:, :, -2 * from_block_size:-from_block_size, :to_block_size] = second_last_attn_weights[:, :, :, :to_block_size] attention_probs[:, :, -2 * from_block_size:-from_block_size, -3 * to_block_size:] = second_last_attn_weights[:, :, :, to_block_size:4 * to_block_size] for (p1, i1, w1) in zip(range(bsz), rand_attn, second_last_attn_weights): for (p2, i2, w2) in zip(range(n_heads), i1, w1): attn_probs_view = attention_probs.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size) right_slice = w2[:, 4 * to_block_size:] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(from_block_size, n_rand_blocks, to_block_size) attention_probs[:, :, -from_block_size:, :] = last_attn_weights else: attention_probs = None return (context_layer, attention_probs) @staticmethod def torch_gather_b2(params, indices): if params.shape[:2] != indices.shape[:2]: raise ValueError(f'Make sure that the first two dimensions of params and indices are identical, but they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}') num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode='floor') * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size): num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for (p1, i1) in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): plan_from_length = [] plan_num_rand_blocks = [] if 2 * num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif num_rand_blocks + 5 < from_seq_length // from_block_size: plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - num_rand_blocks // 2) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return (plan_from_length, plan_num_rand_blocks) def _bigbird_block_rand_mask(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) if not self.training: return rand_attn middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > 2 * to_block_size: last = last_idx // to_block_size - 1 r = num_rand_blocks for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] elif start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif end + 1 == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation(np.concatenate((middle_seq[:start], middle_seq[end + 1:last])))[:r] return rand_attn def _bigbird_block_rand_mask_with_head(self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1): if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError('Error the number of blocks needs to be same!') if from_seq_length not in plan_from_length: raise ValueError('Error from sequence length not in plan!') num_blocks = from_seq_length // from_block_size plan_block_length = np.array(plan_from_length) // from_block_size max_plan_idx = plan_from_length.index(from_seq_length) rand_attn = [np.zeros((num_blocks, np.sum(plan_num_rand_blocks[:max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads)] if not self.training: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top:num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention(block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1): to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) perm_block = np.random.permutation(to_block_list) illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) if block_id == 1: illegal_blocks.append(to_end_block_id - 2) if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) class BigBirdPegasusEncoderAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.seed = seed self.attention_type = config.attention_type if self.attention_type == 'original_full': self.self = BigBirdPegasusSelfAttention(config) elif self.attention_type == 'block_sparse': self.self = BigBirdPegasusBlockSparseAttention(config, seed) else: raise ValueError(f'attention_type can either be original_full or block_sparse, but is {self.config.attention_type}') self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias) def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value if value == 'original_full': attn_weights = BigBirdPegasusSelfAttention(self.config) else: attn_weights = BigBirdPegasusBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward(self, hidden_states, attention_mask=None, head_mask=None, past_key_value=None, output_attentions=False, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None): head_mask = head_mask.reshape(1, -1, 1, 1) if head_mask is not None else None if self.config.attention_type == 'original_full': self_outputs = self.self(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, output_attentions=output_attentions) else: self_outputs = self.self(hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] return outputs class BigBirdPegasusDecoderAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BigBirdPegasusConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class BigBirdPegasusEncoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig, seed=None): super().__init__() self.attention_type = config.attention_type self.embed_dim = config.d_model self.self_attn = BigBirdPegasusEncoderAttention(config, seed=seed) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions: bool=False): residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=from_blocked_mask, to_blocked_mask=to_blocked_mask) hidden_states = self_attention_outputs[0] hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attention_outputs[1],) return outputs def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value self.self_attn.set_attention_type(value) class BigBirdPegasusDecoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BigBirdPegasusDecoderAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BigBirdPegasusDecoderAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BigBirdPegasusClassificationHead(nn.Module): def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BigBirdPegasusPreTrainedModel(PreTrainedModel): config_class = BigBirdPegasusConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['BigBirdPegasusEncoderLayer', 'BigBirdPegasusDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_param_buffer_assignment = False def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids} return dummy_inputs BIGBIRD_PEGASUS_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BigBirdPegasusConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BIGBIRD_PEGASUS_GENERATION_EXAMPLE = '\n Summarization example:\n\n ```python\n >>> from transformers import AutoTokenizer, BigBirdPegasusForConditionalGeneration\n\n >>> model = BigBirdPegasusForConditionalGeneration.from_pretrained("google/bigbird-pegasus-large-arxiv")\n >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv")\n\n >>> ARTICLE_TO_SUMMARIZE = (\n ... "The dominant sequence transduction models are based on complex recurrent or convolutional neural "\n ... "networks in an encoder-decoder configuration. The best performing models also connect the encoder "\n ... "and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, "\n ... "based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. "\n ... "Experiments on two machine translation tasks show these models to be superior in quality "\n ... "while being more parallelizable and requiring significantly less time to train."\n ... )\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors="pt", truncation=True)\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=15)\n >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \'dominant sequence models are based on recurrent or convolutional neural networks .\'\n ```\n' BIGBIRD_PEGASUS_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Provide for translation and summarization training. By default, the model will create this tensor by\n shifting the `input_ids` to the right, following the paper.\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should read\n [`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in\n [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n\n decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" BIGBIRD_PEGASUS_STANDALONE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`ProphetNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class BigBirdPegasusEncoder(BigBirdPegasusPreTrainedModel): def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.attention_type = config.attention_type self.block_size = config.block_size self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = BigBirdPegasusScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is None: attention_mask = torch.ones(input_shape, device=hidden_states.device) attention_mask = attention_mask.long() max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == 'block_sparse' and input_shape[1] <= max_tokens_to_attend: sequence_length = input_shape[1] logger.warning(f"Attention type 'block_sparse' is not possible if sequence_length: {sequence_length} <= num global tokens: 2 * config.block_size + min. num sliding tokens: 3 * config.block_size + config.num_random_blocks * config.block_size + additional buffer: config.num_random_blocks * config.block_size = {max_tokens_to_attend} with config.block_size = {self.config.block_size}, config.num_random_blocks = {self.config.num_random_blocks}. Changing attention type to 'original_full'...") self.set_attention_type('original_full') if self.attention_type == 'block_sparse': (padding_len, hidden_states, attention_mask) = self._pad_to_block_size(hidden_states, attention_mask) else: padding_len = 0 if self.attention_type == 'original_full': attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) blocked_encoder_mask = band_mask = from_mask = to_mask = None elif self.attention_type == 'block_sparse': (blocked_encoder_mask, band_mask, from_mask, to_mask) = self.create_masks_for_block_sparse_attn(attention_mask, self.block_size) attention_mask = None else: raise ValueError(f'attention_type can either be original_full or block_sparse, but is {self.attention_type}') encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, band_mask, from_mask, to_mask, blocked_encoder_mask, blocked_encoder_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layernorm_embedding(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if padding_len > 0: hidden_states = hidden_states[:, :-padding_len] if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) self.encoder_o = hidden_states return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def set_attention_type(self, value: str): if value not in ['original_full', 'block_sparse']: raise ValueError(f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}") if value == self.attention_type: return self.attention_type = value for layer in self.layers: layer.set_attention_type(value) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): (batch_size, seq_length) = attention_mask.size() if seq_length % block_size != 0: raise ValueError(f'Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}.') def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): exp_blocked_to_pad = torch.cat([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2) band_mask = torch.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return (blocked_encoder_mask, band_mask, from_mask, to_mask) def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor): block_size = self.config.block_size (batch_size, seq_len) = hidden_states.shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.warning_once(f'Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of `config.block_size`: {block_size}') pad_id = self.config.pad_token_id device = hidden_states.device input_ids_padding = torch.ones((batch_size, padding_len), dtype=torch.long, device=device) * pad_id inputs_embeds_padding = self.embed_tokens(input_ids_padding) hidden_states = torch.cat([hidden_states, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad(attention_mask, (0, padding_len), value=0) return (padding_len, hidden_states, attention_mask) class BigBirdPegasusDecoder(BigBirdPegasusPreTrainedModel): def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = BigBirdPegasusScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input_shape, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm_embedding(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top.', BIGBIRD_PEGASUS_START_DOCSTRING) class BigBirdPegasusModel(BigBirdPegasusPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) (padding_idx, vocab_size) = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = BigBirdPegasusScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqModelOutput]: if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError('If no `decoder_input_ids` or `decoder_inputs_embeds` are passed, `input_ids` cannot be `None`. Please pass either `input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`.') decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, self.config.decoder_start_token_id) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings('The BigBirdPegasus Model with a language modeling head. Can be used for summarization.', BIGBIRD_PEGASUS_START_DOCSTRING) class BigBirdPegasusForConditionalGeneration(BigBirdPegasusPreTrainedModel): base_model_prefix = 'model' _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight', 'lm_head.weight'] _keys_to_ignore_on_load_missing = ['final_logits_bias'] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) self.model = BigBirdPegasusModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BIGBIRD_PEGASUS_GENERATION_EXAMPLE) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @add_start_docstrings('\n BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g.\n for GLUE tasks.\n ', BIGBIRD_PEGASUS_START_DOCSTRING) class BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config: BigBirdPegasusConfig, **kwargs): super().__init__(config, **kwargs) self.model = BigBirdPegasusModel(config) self.classification_head = BigBirdPegasusClassificationHead(config.d_model, config.d_model, config.num_labels, config.classifier_dropout) self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError(f'Passing input embeddings is currently not supported for {self.__class__.__name__}') outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError('All examples must have the same number of tokens.') sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[:, -1, :] logits = self.classification_head(sentence_representation) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Seq2SeqSequenceClassifierOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('\n BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BIGBIRD_PEGASUS_START_DOCSTRING) class BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel): _tied_weights_keys = ['encoder.embed_tokens.weight', 'decoder.embed_tokens.weight'] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BigBirdPegasusModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.Tensor=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) class BigBirdPegasusDecoderWrapper(BigBirdPegasusPreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = BigBirdPegasusDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BigBirdPegasusForCausalLM(BigBirdPegasusPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BigBirdPegasusDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/biogpt/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_biogpt': ['BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_biogpt'] = ['BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel'] if TYPE_CHECKING: from .configuration_biogpt import BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/biogpt/configuration_biogpt.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BioGptConfig(PretrainedConfig): model_type = 'biogpt' def __init__(self, vocab_size=42384, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, initializer_range=0.02, layer_norm_eps=1e-12, scale_embedding=True, use_cache=True, layerdrop=0.0, activation_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.scale_embedding = scale_embedding self.use_cache = use_cache self.layerdrop = layerdrop self.activation_dropout = activation_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) # File: transformers-main/src/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() json_indent = 2 class Dictionary: def __init__(self, *, bos='', pad='', eos='', unk='', extra_special_symbols=None): (self.bos_word, self.unk_word, self.pad_word, self.eos_word) = (bos, unk, pad, eos) self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def __len__(self): return len(self.symbols) def __contains__(self, sym): return sym in self.indices @classmethod def load(cls, f): d = cls() d.add_from_file(f) return d def add_symbol(self, word, n=1, overwrite=False): if word in self.indices and (not overwrite): idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def _load_meta(self, lines): return 0 def add_from_file(self, f): if isinstance(f, str): try: with open(f, 'r', encoding='utf-8') as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(f)) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: (line, field) = line.rstrip().rsplit(' ', 1) if field == '#fairseq:overwrite': overwrite = True (line, field) = line.rsplit(' ', 1) else: overwrite = False count = int(field) word = line if word in self and (not overwrite): raise RuntimeError("Duplicate word found when loading Dictionary: '{}'. Duplicate words can overwrite earlier ones by adding the #fairseq:overwrite flag at the end of the corresponding row in the dictionary file. If using the Camembert model, please download an updated copy of the model file.".format(word)) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError("Incorrect dictionary format, expected ' [flags]'") def rewrite_dict_keys(d): d2 = dict(((re.sub('@@$', '', k), v) if k.endswith('@@') else (re.sub('$', '', k), v) for (k, v) in d.items())) keep_keys = ' '.split() for k in keep_keys: del d2[f'{k}'] d2[k] = d[k] return d2 def convert_biogpt_checkpoint_to_pytorch(biogpt_checkpoint_path, pytorch_dump_folder_path): if not os.path.exists(biogpt_checkpoint_path): raise ValueError(f'path {biogpt_checkpoint_path} does not exist!') os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f'Writing results to {pytorch_dump_folder_path}') checkpoint_file = os.path.join(biogpt_checkpoint_path, 'checkpoint.pt') if not os.path.isfile(checkpoint_file): raise ValueError(f'path to the file {checkpoint_file} does not exist!') chkpt = torch.load(checkpoint_file, map_location='cpu') args = chkpt['cfg']['model'] dict_file = os.path.join(biogpt_checkpoint_path, 'dict.txt') if not os.path.isfile(dict_file): raise ValueError(f'path to the file {dict_file} does not exist!') src_dict = Dictionary.load(dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES['vocab_file']) print(f'Generating {src_vocab_file} of {src_vocab_size} records') with open(src_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) bpecodes_file = os.path.join(biogpt_checkpoint_path, 'bpecodes') if not os.path.isfile(bpecodes_file): raise ValueError(f'path to the file {bpecodes_file} does not exist!') merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES['merges_file']) shutil.copyfile(bpecodes_file, merges_file) biogpt_model_config_file = os.path.join(pytorch_dump_folder_path, 'config.json') model_conf = {'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.02, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1e-12, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size} print(f'Generating {biogpt_model_config_file}') with open(biogpt_model_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) biogpt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = {'bos_token': '', 'eos_token': '', 'model_max_length': 1024, 'pad_token': '', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': ''} print(f'Generating {biogpt_tokenizer_config_file}') with open(biogpt_tokenizer_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) model_state_dict = chkpt['model'] ignore_keys = ['decoder.version'] for k in ignore_keys: model_state_dict.pop(k, None) layer_names = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith('output_projection.weight'): model_state_dict[layer_name.replace('decoder.', '')] = model_state_dict.pop(layer_name) else: model_state_dict[layer_name.replace('decoder', 'biogpt')] = model_state_dict.pop(layer_name) config = BioGptConfig.from_pretrained(pytorch_dump_folder_path) model_new = BioGptForCausalLM(config) model_new.load_state_dict(model_state_dict) pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f'Generating {pytorch_weights_dump_path}') torch.save(model_state_dict, pytorch_weights_dump_path) print('Conversion is done!') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--biogpt_checkpoint_path', default=None, type=str, required=True, help='Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts, bpecodes, etc.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/biogpt/modeling_biogpt.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_biogpt import BioGptConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'microsoft/biogpt' _CONFIG_FOR_DOC = 'BioGptConfig' class BioGptLearnedPositionalEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int=0): attention_mask = attention_mask.long() positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 positions = positions[:, past_key_values_length:] return super().forward(positions + self.offset) class BioGptScaledWordEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class BioGptAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BioGptConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class BioGptDecoderLayer(nn.Module): def __init__(self, config: BioGptConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BioGptAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=config.attention_probs_dropout_prob, is_decoder=True) self.dropout = config.hidden_dropout_prob self.activation_fn = ACT2FN[config.hidden_act] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class BioGptPreTrainedModel(PreTrainedModel): config_class = BioGptConfig base_model_prefix = 'biogpt' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIOGPT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`~BioGptConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BIOGPT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare BioGPT Model transformer outputting raw hidden-states without any specific head on top.', BIOGPT_START_DOCSTRING) class BioGptModel(BioGptPreTrainedModel): def __init__(self, config: BioGptConfig): super().__init__(config) self.config = config self.layerdrop = config.layerdrop self.dropout = config.hidden_dropout_prob self.embed_dim = config.hidden_size self.padding_idx = config.pad_token_id embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = BioGptScaledWordEmbedding(config.vocab_size, self.embed_dim, self.padding_idx, embed_scale=embed_scale) self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim) self.layers = nn.ModuleList([BioGptDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(self.embed_dim) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input = input_ids input_shape = input.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if attention_mask is None: attention_mask = torch.ones((inputs_embeds.shape[0], inputs_embeds.shape[1] + past_key_values_length), dtype=torch.bool, device=inputs_embeds.device) elif attention_mask.shape[1] != past_key_values_length + input_shape[1]: raise ValueError(f'The provided attention mask has length {attention_mask.shape[1]}, but its length should be {past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)') positions = self.embed_positions(attention_mask, past_key_values_length) attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = None next_decoder_cache = () if use_cache else None for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.layer_norm(hidden_states) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('BioGPT Model with a `language modeling` head on top for CLM fine-tuning.', BIOGPT_START_DOCSTRING) class BioGptForCausalLM(BioGptPreTrainedModel): _tied_weights_keys = ['output_projection.weight'] def __init__(self, config): super().__init__(config) self.biogpt = BioGptModel(config) self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.output_projection def set_output_embeddings(self, new_embeddings): self.output_projection = new_embeddings @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.biogpt(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.output_projection(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask, inputs_embeds=None, past_key_values=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache')}) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('\n BioGPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BIOGPT_START_DOCSTRING) class BioGptForTokenClassification(BioGptPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.biogpt = BioGptModel(config) if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout else: classifier_dropout = config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.biogpt(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where(active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The BioGpt Model transformer with a sequence classification head on top (linear layer).\n\n [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it is required to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', BIOGPT_START_DOCSTRING) class BioGptForSequenceClassification(BioGptPreTrainedModel): def __init__(self, config: BioGptConfig): super().__init__(config) self.num_labels = config.num_labels self.biogpt = BioGptModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.biogpt(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] if self.config.pad_token_id is None: sequence_length = -1 elif input_ids is not None: sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_length = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def get_input_embeddings(self): return self.biogpt.embed_tokens def set_input_embeddings(self, value): self.biogpt.embed_tokens = value # File: transformers-main/src/transformers/models/biogpt/tokenization_biogpt.py """""" import json import os from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class BioGptTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, unk_token='', bos_token='', eos_token='', sep_token='', pad_token='', **kwargs): try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use BioGptTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.lang = 'en' self.sm = sacremoses self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} '' with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize(text, aggressive_dash_splits=True, return_str=False, escape=True) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + '' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def _tokenize(self, text, bypass_tokenizer=False): if bypass_tokenizer: text = text.split() else: text = self.moses_tokenize(text, self.lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): tokens = [t.replace(' ', '').replace('', ' ') for t in tokens] tokens = ''.join(tokens).split() text = self.moses_detokenize(tokens, self.lang) return text def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.sep_token_id] + token_ids_0 sep = [self.sep_token_id] return sep + token_ids_0 + sep + token_ids_1 def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) return [1] + [0] * len(token_ids_0) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def __getstate__(self): state = self.__dict__.copy() state['sm'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses # File: transformers-main/src/transformers/models/bit/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_bit': ['BitConfig', 'BitOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bit'] = ['BitForImageClassification', 'BitModel', 'BitPreTrainedModel', 'BitBackbone'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_bit'] = ['BitImageProcessor'] if TYPE_CHECKING: from .configuration_bit import BitConfig, BitOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bit import BitBackbone, BitForImageClassification, BitModel, BitPreTrainedModel try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bit import BitImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/bit/configuration_bit.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class BitConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'bit' layer_types = ['preactivation', 'bottleneck'] supported_padding = ['SAME', 'VALID'] def __init__(self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type='preactivation', hidden_act='relu', global_padding=None, num_groups=32, drop_path_rate=0.0, embedding_dynamic_padding=False, output_stride=32, width_factor=1, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") if global_padding is not None: if global_padding.upper() in self.supported_padding: global_padding = global_padding.upper() else: raise ValueError(f'Padding strategy {global_padding} not supported') self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.layer_type = layer_type self.hidden_act = hidden_act self.global_padding = global_padding self.num_groups = num_groups self.drop_path_rate = drop_path_rate self.embedding_dynamic_padding = embedding_dynamic_padding self.output_stride = output_stride self.width_factor = width_factor self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) # File: transformers-main/src/transformers/models/bit/convert_bit_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_config(model_name): repo_id = 'huggingface/label-files' filename = 'imagenet-1k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} label2id = {v: k for (k, v) in id2label.items()} conv_layer = 'std_conv' if 'bit' in model_name else False config = BitConfig(conv_layer=conv_layer, num_labels=1000, id2label=id2label, label2id=label2id) return config def rename_key(name): if 'stem.conv' in name: name = name.replace('stem.conv', 'bit.embedder.convolution') if 'blocks' in name: name = name.replace('blocks', 'layers') if 'head.fc' in name: name = name.replace('head.fc', 'classifier.1') if name.startswith('norm'): name = 'bit.' + name if 'bit' not in name and 'classifier' not in name: name = 'bit.encoder.' + name return name def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): config = get_config(model_name) timm_model = create_model(model_name, pretrained=True) timm_model.eval() state_dict = timm_model.state_dict() for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val.squeeze() if 'head' in key else val model = BitForImageClassification(config) model.eval() model.load_state_dict(state_dict) transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = {'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST} processor = BitImageProcessor(do_resize=True, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist()) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors='pt').pixel_values assert torch.allclose(timm_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print('Logits:', logits[0, :3]) print('Predicted class:', model.config.id2label[logits.argmax(-1).item()]) timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=0.001) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub') model.push_to_hub(f'ybelkada/{model_name}') processor.push_to_hub(f'ybelkada/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='resnetv2_50x1_bitm', type=str, help="Name of the BiT timm model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub.') args = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/bit/image_processing_bit.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class BitImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/bit/modeling_bit.py """""" import collections import math from typing import Optional, Tuple import numpy as np import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_bit import BitConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BitConfig' _CHECKPOINT_FOR_DOC = 'google/bit-50' _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7] _IMAGE_CLASS_CHECKPOINT = 'google/bit-50' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tiger cat' def get_padding_value(padding=None, kernel_size=7, stride=1, dilation=1) -> Tuple[Tuple, bool]: dynamic = False if padding is None: padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 return (padding, dynamic) if isinstance(padding, str): padding = padding.lower() if padding == 'same': if stride == 1 and dilation * (kernel_size - 1) % 2 == 0: padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 else: padding = 0 dynamic = True elif padding == 'valid': padding = 0 else: padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 return (padding, dynamic) class WeightStandardizedConv2d(nn.Conv2d): def __init__(self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-06): (padding, is_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__(in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) if is_dynamic: self.pad = DynamicPad2d(kernel_size, stride, dilation) else: self.pad = None self.eps = eps def forward(self, hidden_state): if self.pad is not None: hidden_state = self.pad(hidden_state) weight = nn.functional.batch_norm(self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0.0, eps=self.eps).reshape_as(self.weight) hidden_state = nn.functional.conv2d(hidden_state, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return hidden_state class BitGroupNormActivation(nn.GroupNorm): def __init__(self, config, num_channels, eps=1e-05, affine=True, apply_activation=True): super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine) if apply_activation: self.activation = ACT2FN[config.hidden_act] else: self.activation = nn.Identity() def forward(self, hidden_state): hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps) hidden_state = self.activation(hidden_state) return hidden_state class DynamicPad2d(nn.Module): def __init__(self, kernel_size, stride, dilation, value=0): super().__init__() if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) if isinstance(stride, int): stride = (stride, stride) if isinstance(dilation, int): dilation = (dilation, dilation) self.kernel_size = kernel_size self.stride = stride self.dilation = dilation self.value = value def compute_padding(x, kernel_size, stride, dilation): return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) self.compute_padding = compute_padding def __call__(self, input): (input_height, input_width) = input.size()[-2:] padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0]) padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1]) if padding_height > 0 or padding_width > 0: input = nn.functional.pad(input, [padding_width // 2, padding_width - padding_width // 2, padding_height // 2, padding_height - padding_height // 2], value=self.value) return input class BitMaxPool2d(nn.MaxPool2d): def __init__(self, kernel_size: int, stride=None, dilation=1, ceil_mode=False, padding=(0, 0), padding_value=0, use_dynamic_padding=True): kernel_size = kernel_size if isinstance(kernel_size, collections.abc.Iterable) else (kernel_size, kernel_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) dilation = dilation if isinstance(dilation, collections.abc.Iterable) else (dilation, dilation) super().__init__(kernel_size, stride, padding, dilation, ceil_mode) if use_dynamic_padding: self.pad = DynamicPad2d(kernel_size, stride, dilation, padding_value) else: self.pad = nn.Identity() def forward(self, hidden_states): hidden_states = self.pad(hidden_states) return nn.functional.max_pool2d(hidden_states, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode) class BitEmbeddings(nn.Module): def __init__(self, config: BitConfig): super().__init__() self.convolution = WeightStandardizedConv2d(config.num_channels, config.embedding_size, kernel_size=7, stride=2, eps=1e-08, padding=config.global_padding) self.pooler = BitMaxPool2d(kernel_size=3, stride=2, use_dynamic_padding=config.embedding_dynamic_padding) if config.global_padding is not None and config.global_padding.upper() == 'SAME': self.pad = nn.Identity() else: self.pad = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=0.0) if not config.layer_type == 'preactivation': self.norm = BitGroupNormActivation(config, num_channels=config.embedding_size) else: self.norm = nn.Identity() self.num_channels = config.num_channels def forward(self, pixel_values: Tensor) -> Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embedding = self.convolution(pixel_values) embedding = self.pad(embedding) embedding = self.norm(embedding) embedding = self.pooler(embedding) return embedding def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class BitDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) def make_div(value, divisor=8): min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) if new_value < 0.9 * value: new_value += divisor return new_value class BitPreActivationBottleneckLayer(nn.Module): def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False): super().__init__() first_dilation = first_dilation or dilation out_channels = out_channels or in_channels mid_channels = make_div(out_channels * bottle_ratio) if is_first_layer: self.downsample = BitDownsampleConv(config, in_channels, out_channels, stride=stride, preact=True) else: self.downsample = None self.norm1 = BitGroupNormActivation(config, in_channels) self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-08, padding=config.global_padding) self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels) self.conv2 = WeightStandardizedConv2d(mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-08, padding=config.global_padding) self.norm3 = BitGroupNormActivation(config, mid_channels) self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-08, padding=config.global_padding) self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, hidden_states): hidden_states_preact = self.norm1(hidden_states) shortcut = hidden_states if self.downsample is not None: shortcut = self.downsample(hidden_states_preact) hidden_states = self.conv1(hidden_states_preact) hidden_states = self.conv2(self.norm2(hidden_states)) hidden_states = self.conv3(self.norm3(hidden_states)) hidden_states = self.drop_path(hidden_states) return hidden_states + shortcut class BitBottleneckLayer(nn.Module): def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False): super().__init__() first_dilation = first_dilation or dilation out_channels = out_channels or in_channels mid_chs = make_div(out_channels * bottle_ratio) if is_first_layer: self.downsample = BitDownsampleConv(config, in_channels, out_channels, stride=stride, preact=False) else: self.downsample = None self.conv1 = WeightStandardizedConv2d(in_channels, mid_chs, 1, eps=1e-08, padding=config.global_padding) self.norm1 = BitGroupNormActivation(config, num_channels=mid_chs) self.conv2 = WeightStandardizedConv2d(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups, eps=1e-08, padding=config.global_padding) self.norm2 = BitGroupNormActivation(config, num_channels=mid_chs) self.conv3 = WeightStandardizedConv2d(mid_chs, out_channels, 1, eps=1e-08, padding=config.global_padding) self.norm3 = BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False) self.drop_path = BitDropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_states): shortcut = hidden_states if self.downsample is not None: shortcut = self.downsample(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = self.conv3(hidden_states) hidden_states = self.norm3(hidden_states) hidden_states = self.drop_path(hidden_states) hidden_states = self.activation(hidden_states + shortcut) return hidden_states class BitDownsampleConv(nn.Module): def __init__(self, config, in_channels, out_channels, stride=1, preact=True): super().__init__() self.conv = WeightStandardizedConv2d(in_channels, out_channels, 1, stride=stride, eps=1e-08, padding=config.global_padding) self.norm = nn.Identity() if preact else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False) def forward(self, x): return self.norm(self.conv(x)) class BitStage(nn.Module): def __init__(self, config, in_channels, out_channels, stride, dilation, depth, bottle_ratio=0.25, layer_dropout=None): super().__init__() first_dilation = 1 if dilation in (1, 2) else 2 if config.layer_type == 'bottleneck': layer_cls = BitBottleneckLayer else: layer_cls = BitPreActivationBottleneckLayer prev_chs = in_channels self.layers = nn.Sequential() for layer_idx in range(depth): (stride, drop_path_rate, is_first_layer) = self._get_updated_hyperparameters(layer_idx, stride, layer_dropout) self.layers.add_module(str(layer_idx), layer_cls(config, prev_chs, out_channels, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, first_dilation=first_dilation, drop_path_rate=drop_path_rate, is_first_layer=is_first_layer)) prev_chs = out_channels first_dilation = dilation def _get_updated_hyperparameters(self, layer_idx, stride, layer_dropout): if layer_dropout: drop_path_rate = layer_dropout[layer_idx] else: drop_path_rate = 0.0 if layer_idx != 0: stride = 1 is_first_layer = layer_idx == 0 return (stride, drop_path_rate, is_first_layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for (_, layer) in enumerate(self.layers): hidden_state = layer(hidden_state) return hidden_state class BitEncoder(nn.Module): def __init__(self, config: BitConfig): super().__init__() self.stages = nn.ModuleList([]) prev_chs = config.embedding_size current_stride = 4 dilation = 1 layer_dropouts = [x.tolist() for x in torch.Tensor(np.linspace(0, config.drop_path_rate, sum(config.depths))).split(config.depths)] for (stage_idx, (current_depth, current_hidden_size, layer_dropout)) in enumerate(zip(config.depths, config.hidden_sizes, layer_dropouts)): (out_channels, stride, dilation) = self._get_updated_hyperparameters(stage_idx, current_stride, current_hidden_size, dilation, config) stage = BitStage(config, prev_chs, out_channels, stride=stride, dilation=dilation, depth=current_depth, layer_dropout=layer_dropout) prev_chs = out_channels current_stride *= stride self.stages.add_module(str(stage_idx), stage) def _get_updated_hyperparameters(self, stage_idx, current_stride, current_hidden_size, dilation, config): out_channels = make_div(current_hidden_size * config.width_factor) stride = 1 if stage_idx == 0 else 2 if current_stride >= config.output_stride: dilation *= stride stride = 1 return (out_channels, stride, dilation) def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) class BitPreTrainedModel(PreTrainedModel): config_class = BitConfig base_model_prefix = 'bit' main_input_name = 'pixel_values' _no_split_modules = ['BitEmbeddings'] def _init_weights(self, module): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') elif isinstance(module, nn.Linear): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(module.weight, 1) nn.init.constant_(module.bias, 0) BIT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`BitConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BIT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]\n for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare BiT model outputting raw features without any specific head on top.', BIT_START_DOCSTRING) class BitModel(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embedder = BitEmbeddings(config) self.encoder = BitEncoder(config) self.norm = BitGroupNormActivation(config, num_channels=config.hidden_sizes[-1]) if config.layer_type == 'preactivation' else nn.Identity() self.pooler = nn.AdaptiveAvgPool2d((1, 1)) self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict embedding_output = self.embedder(pixel_values) encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.norm(last_hidden_state) pooled_output = self.pooler(last_hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) @add_start_docstrings('\n BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', BIT_START_DOCSTRING) class BitForImageClassification(BitPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bit = BitModel(config) self.classifier = nn.Sequential(nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()) self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings('\n BiT backbone, to be used with frameworks like DETR and MaskFormer.\n ', BIT_START_DOCSTRING) class BitBackbone(BitPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.bit = BitModel(config) self.num_features = [config.embedding_size] + config.hidden_sizes self.post_init() @add_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.bit(pixel_values, output_hidden_states=True, return_dict=True) hidden_states = outputs.hidden_states feature_maps = () for (idx, stage) in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None) # File: transformers-main/src/transformers/models/blenderbot/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_blenderbot': ['BlenderbotConfig', 'BlenderbotOnnxConfig'], 'tokenization_blenderbot': ['BlenderbotTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_blenderbot_fast'] = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_blenderbot'] = ['BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_blenderbot'] = ['TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_blenderbot'] = ['FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel'] if TYPE_CHECKING: from .configuration_blenderbot import BlenderbotConfig, BlenderbotOnnxConfig from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/blenderbot/configuration_blenderbot.py """""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging logger = logging.get_logger(__name__) class BlenderbotConfig(PretrainedConfig): model_type = 'blenderbot' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=8008, max_position_embeddings=128, encoder_layers=2, encoder_ffn_dim=10240, encoder_attention_heads=32, decoder_layers=24, decoder_ffn_dim=10240, decoder_attention_heads=32, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=2560, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, encoder_no_repeat_ngram_size=3, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, forced_eos_token_id=forced_eos_token_id, **kwargs) class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: (_, num_decoder_layers) = self.num_layers for i in range(num_decoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework) decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, encoder_seq_length) = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] (num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] (_, num_decoder_layers) = self.num_layers for _ in range(num_decoder_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen (_, num_decoder_layers) = self.num_layers (num_encoder_attention_heads, _) = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): if direction not in ['inputs', 'outputs']: raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') name = 'past_key_values' if direction == 'inputs' else 'present' (_, num_decoder_layers) = self.num_layers encoder_sequence = 'past_encoder_sequence' decoder_sequence = 'past_decoder_sequence' if direction == 'inputs' else 'past_decoder_sequence + sequence' for i in range(num_decoder_layers): inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence} inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence} inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence} inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence} # File: transformers-main/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) PATTERNS = [['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc']] def rename_state_dict_key(k): if k == 'embeddings.weight': return 'shared.weight' for (parlai_name, hf_name) in PATTERNS: k = k.replace(parlai_name, hf_name) if k.startswith('encoder'): k = k.replace('.attn', '.self_attn') k = k.replace('norm1', 'self_attn_layer_norm') k = k.replace('norm2', 'final_layer_norm') elif k.startswith('decoder'): k = k.replace('norm1', 'self_attn_layer_norm') k = k.replace('norm2', 'encoder_attn_layer_norm') k = k.replace('norm3', 'final_layer_norm') return k def rename_layernorm_keys(sd): keys = ['model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias'] for k in keys: v = sd.pop(k) new_k = k.replace('layernorm_embedding', 'layer_norm') assert new_k not in sd sd[new_k] = v IGNORE_KEYS = ['START'] @torch.no_grad() def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path): model = torch.load(checkpoint_path, map_location='cpu') sd = model['model'] cfg = BlenderbotConfig.from_json_file(config_json_path) m = BlenderbotForConditionalGeneration(cfg) valid_keys = m.model.state_dict().keys() failures = [] mapping = {} for (k, v) in sd.items(): if k in IGNORE_KEYS: continue new_k = rename_state_dict_key(k) if new_k not in valid_keys: failures.append([k, new_k]) else: mapping[new_k] = v if cfg.normalize_before: rename_layernorm_keys(sd) m.model.load_state_dict(mapping, strict=True) m.half() m.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument('--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use') args = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) # File: transformers-main/src/transformers/models/blenderbot/modeling_blenderbot.py """""" import copy import math import os import warnings from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel from .configuration_blenderbot import BlenderbotConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BlenderbotConfig' _CHECKPOINT_FOR_DOC = 'facebook/blenderbot-400M-distill' def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class BlenderbotLearnedPositionalEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0): (bsz, seq_len) = input_ids_shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions) class BlenderbotScaledWordEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float]=1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class BlenderbotAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) BLENDERBOT_ATTENTION_CLASSES = {'eager': BlenderbotAttention} class BlenderbotEncoderLayer(nn.Module): def __init__(self, config: BlenderbotConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class BlenderbotDecoderLayer(nn.Module): def __init__(self, config: BlenderbotConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BlenderbotPreTrainedModel(PreTrainedModel): config_class = BlenderbotConfig base_model_prefix = 'model' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids, 'decoder_input_ids': input_ids} return dummy_inputs BLENDERBOT_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BlenderbotConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BLENDERBOT_GENERATION_EXAMPLE = '\n Conversation example:\n\n ```python\n >>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration\n\n >>> mname = "facebook/blenderbot-400M-distill"\n >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = AutoTokenizer.from_pretrained(mname)\n >>> UTTERANCE = "My friends are cool but they eat too many carbs."\n >>> print("Human: ", UTTERANCE)\n Human: My friends are cool but they eat too many carbs.\n\n >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")\n >>> reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])\n Bot: That\'s unfortunate. Are they trying to lose weight or are they just trying to be healthier?\n\n >>> REPLY = "I\'m not sure"\n >>> print("Human: ", REPLY)\n Human: I\'m not sure\n\n >>> NEXT_UTTERANCE = (\n ... "My friends are cool but they eat too many carbs. That\'s unfortunate. "\n ... "Are they trying to lose weight or are they just trying to be healthier? "\n ... " I\'m not sure."\n ... )\n >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")\n >>> next_reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])\n Bot: I see. Well, it\'s good that they\'re trying to change their eating habits.\n ```\n' BLENDERBOT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class BlenderbotEncoder(BlenderbotPreTrainedModel): def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = BlenderbotScaledWordEmbedding(config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale) self.embed_positions = BlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class BlenderbotDecoder(BlenderbotPreTrainedModel): def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = BlenderbotScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale) self.embed_positions = BlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input_shape, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare Blenderbot Model outputting raw hidden-states without any specific head on top.', BLENDERBOT_START_DOCSTRING) class BlenderbotModel(BlenderbotPreTrainedModel): _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight'] def __init__(self, config: BlenderbotConfig): super().__init__(config) (padding_idx, vocab_size) = (config.pad_token_id, config.vocab_size) embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = BlenderbotScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = BlenderbotEncoder(config, self.shared) self.decoder = BlenderbotDecoder(config, self.shared) self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path) return super(BlenderbotModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings('The Blenderbot Model with a language modeling head. Can be used for summarization.', BLENDERBOT_START_DOCSTRING) class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias'] _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: BlenderbotConfig): super().__init__(config) self.model = BlenderbotModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path) return super(BlenderbotForConditionalGeneration, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = BlenderbotDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BlenderbotForCausalLM(BlenderbotPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BlenderbotDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/blenderbot/modeling_flax_blenderbot.py """""" import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_blenderbot import BlenderbotConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BlenderbotConfig' _CHECKPOINT_FOR_DOC = 'facebook/blenderbot-400M-distill' BLENDERBOT_START_DOCSTRING = '\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n' BLENDERBOT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLENDERBOT_ENCODE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLENDERBOT_DECODE_INPUTS_DOCSTRING = '\n Args:\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n encoder_outputs (`tuple(tuple(jnp.ndarray)`):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxBlenderbotAttention(nn.Module): config: BlenderbotConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') dense = partial(nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) (self.q_proj, self.k_proj, self.v_proj) = (dense(), dense(), dense()) self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]: is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.q_proj(hidden_states) if is_cross_attention: key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) class FlaxBlenderbotEncoderLayer(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense(self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxBlenderbotEncoderLayerCollection(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBlenderbotEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers)] self.layerdrop = self.config.encoder_layerdrop def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions, deterministic) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxBlenderbotDecoderLayer(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxBlenderbotAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense(self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class FlaxBlenderbotDecoderLayerCollection(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBlenderbotDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers)] self.layerdrop = self.config.decoder_layerdrop def __call__(self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) class FlaxBlenderbotEncoder(nn.Module): config: BlenderbotConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed(self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std)) self.layers = FlaxBlenderbotEncoderLayerCollection(self.config, self.dtype) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions) class FlaxBlenderbotDecoder(nn.Module): config: BlenderbotConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed(self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std)) self.layers = FlaxBlenderbotDecoderLayerCollection(self.config, self.dtype) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale positions = self.embed_positions(position_ids) hidden_states = inputs_embeds + positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) class FlaxBlenderbotModule(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.shared = nn.Embed(self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.encoder = FlaxBlenderbotEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = FlaxBlenderbotDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) class FlaxBlenderbotPreTrainedModel(FlaxPreTrainedModel): config_class = BlenderbotConfig base_model_prefix: str = 'model' module_class: nn.Module = None def __init__(self, config: BlenderbotConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') input_ids = input_ids.at[..., -1].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache']) @add_start_docstrings(BLENDERBOT_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotConfig) def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward) @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: (outputs, past) = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past) = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING) def __call__(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs) @add_start_docstrings('The bare MBart Model transformer outputting raw hidden-states without any specific head on top.', BLENDERBOT_START_DOCSTRING) class FlaxBlenderbotModel(FlaxBlenderbotPreTrainedModel): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 module_class = FlaxBlenderbotModule append_call_sample_docstring(FlaxBlenderbotModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) class FlaxBlenderbotForConditionalGenerationModule(nn.Module): config: BlenderbotConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = FlaxBlenderbotModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_logits_bias = self.param('final_logits_bias', self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables['params']['shared']['embedding'] lm_logits = self.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput(logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('The Blenderbot Model with a language modeling head. Can be used for summarization.', BLENDERBOT_START_DOCSTRING) class FlaxBlenderbotForConditionalGeneration(FlaxBlenderbotPreTrainedModel): module_class = FlaxBlenderbotForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables['params']['shared']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias return (lm_logits, outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: (lm_logits, decoder_outputs) = outputs else: ((lm_logits, decoder_outputs), past) = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs def prepare_inputs_for_generation(self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array]=None, decoder_attention_mask: Optional[jax.Array]=None, encoder_outputs=None, **kwargs): (batch_size, seq_length) = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'encoder_attention_mask': attention_mask, 'decoder_attention_mask': extended_attention_mask, 'decoder_position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['decoder_position_ids'] = model_kwargs['decoder_position_ids'][:, -1:] + 1 return model_kwargs FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING = '\n Returns:\n\n Conversation example::\n\n ```py\n >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration\n\n >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")\n\n >>> UTTERANCE = "My friends are cool but they eat too many carbs."\n >>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors="np")\n\n >>> # Generate Reply\n >>> reply_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5, early_stopping=True).sequences\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])\n ```\n' overwrite_call_docstring(FlaxBlenderbotForConditionalGeneration, BLENDERBOT_INPUTS_DOCSTRING + FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING) append_replace_return_docstrings(FlaxBlenderbotForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/blenderbot/modeling_tf_blenderbot.py """""" from __future__ import annotations import os import random import warnings from typing import List, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_blenderbot import BlenderbotConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/blenderbot-400M-distill' _CONFIG_FOR_DOC = 'BlenderbotConfig' LARGE_NEGATIVE = -100000000.0 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) shifted_input_ids = tf.where(shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids) assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int=0): bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFBlenderbotLearnedPositionalEmbedding(keras.layers.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): super().__init__(num_embeddings, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int=0, position_ids: tf.Tensor | None=None): if position_ids is None: seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name='range') position_ids += past_key_values_length return super().call(tf.cast(position_ids, dtype=tf.int32)) class TFBlenderbotAttention(keras.layers.Layer): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='k_proj') self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='q_proj') self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='v_proj') self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='out_proj') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call(self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor | None]: is_cross_attention = key_value_states is not None (bsz, tgt_len, embed_dim) = shape_list(hidden_states) query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal(shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}') if attention_mask is not None: tf.debugging.assert_equal(shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}') attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal(shape_list(layer_head_mask), [self.num_heads], message=f'Head mask for a single layer should be of size {self.num_heads}, but is {shape_list(layer_head_mask)}') attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal(shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}') attn_output = tf.transpose(tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return (attn_output, attn_weights, past_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFBlenderbotEncoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotAttention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name='self_attn') self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: Optional[bool]=False): residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) (hidden_states, self_attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask) tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}') hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return (hidden_states, self_attn_weights) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBlenderbotDecoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name='self_attn', is_decoder=True) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.encoder_attn = TFBlenderbotAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name='encoder_attn', is_decoder=True) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='encoder_attn_layer_norm') self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'encoder_attn', None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, 'encoder_attn_layer_norm', None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBlenderbotPreTrainedModel(TFPreTrainedModel): config_class = BlenderbotConfig base_model_prefix = 'model' BLENDERBOT_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' BLENDERBOT_GENERATION_EXAMPLE = '\n Conversation example::\n\n ```py\n >>> from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration\n\n >>> mname = "facebook/blenderbot-400M-distill"\n >>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = AutoTokenizer.from_pretrained(mname)\n >>> UTTERANCE = "My friends are cool but they eat too many carbs."\n >>> print("Human: ", UTTERANCE)\n\n >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")\n >>> reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])\n\n >>> REPLY = "I\'m not sure"\n >>> print("Human: ", REPLY)\n >>> NEXT_UTTERANCE = (\n ... "My friends are cool but they eat too many carbs. That\'s unfortunate. "\n ... "Are they trying to lose weight or are they just trying to be healthier? "\n ... " I\'m not sure."\n ... )\n >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")\n >>> next_reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])\n ```\n' BLENDERBOT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tf.FloatTensor`, *optional*):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape `(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`). Set to `False` during training, `True` during generation\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @keras_serializable class TFBlenderbotEncoder(keras.layers.Layer): config_class = BlenderbotConfig '' def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.layers = [TFBlenderbotEncoderLayer(config, name=f'layers.{i}') for i in range(config.encoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='layer_norm') def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call(self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.dropout(hidden_states, training=training) if attention_mask is not None: attention_mask = _expand_mask(attention_mask) else: attention_mask = None encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: tf.debugging.assert_equal(shape_list(head_mask)[0], len(self.layers), message=f'The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(head_mask)[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue (hidden_states, attn) = encoder_layer(hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None) if output_attentions: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotDecoder(keras.layers.Layer): config_class = BlenderbotConfig '' def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFBlenderbotDecoderLayer(config, name=f'layers.{i}') for i in range(config.decoder_layers)] self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='layer_norm') self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call(self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if position_ids is None: positions = self.embed_positions(input_shape, past_key_values_length) else: positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask(tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) hidden_states = hidden_states + positions hidden_states = self.dropout(hidden_states, training=training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions and encoder_hidden_states is not None else None present_key_values = () if use_cache else None for (attn_mask_name, attn_mask) in [('head_mask', head_mask), ('cross_attn_head_mask', cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal(shape_list(attn_mask)[0], len(self.layers), message=f'The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for {shape_list(attn_mask)[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None (hidden_states, layer_self_attn, layer_cross_attn, present_key_value) = decoder_layer(hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return (hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns) else: return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotMainLayer(keras.layers.Layer): config_class = BlenderbotConfig def __init__(self, config: BlenderbotConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = keras.layers.Embedding(input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name='model.shared') self.shared.load_weight_prefix = 'model.shared' self.encoder = TFBlenderbotEncoder(config, self.shared, name='encoder') self.decoder = TFBlenderbotDecoder(config, self.shared, name='decoder') def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call(self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs): output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) elif return_dict and (not isinstance(encoder_outputs, TFBaseModelOutput)): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) elif not return_dict and (not isinstance(encoder_outputs, tuple)): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder(decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.shared.load_weight_prefix + '/' + self.shared.name + '/'): self.shared.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings('The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top.', BLENDERBOT_START_DOCSTRING) class TFBlenderbotModel(TFBlenderbotPreTrainedModel): def __init__(self, config: BlenderbotConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotMainLayer(config, name='model') def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': from ..blenderbot_small import TFBlenderbotSmallModel warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return TFBlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]: outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput(last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class BiasLayer(keras.layers.Layer): def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings('The BLENDERBOT Model with a language modeling head. Can be used for summarization.', BLENDERBOT_START_DOCSTRING) class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotMainLayer(config, name='model') self.use_cache = config.use_cache self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, config.vocab_size], initializer='zeros', trainable=False) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {'final_logits_bias': self.bias_layer.bias} def set_bias(self, value): vocab_size = value['final_logits_bias'].shape[-1] self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, vocab_size], initializer='zeros', trainable=False) self.bias_layer.bias.assign(value['final_logits_bias']) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): if pretrained_model_name_or_path == 'facebook/blenderbot-90M': from ..blenderbot_small import TFBlenderbotSmallForConditionalGeneration warnings.warn("The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical checkpoint `facebook/small_blenderbot-90M` with `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.", FutureWarning) return TFBlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE) def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput(logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: decoder_position_ids = past_key_values[0][0].shape[2] else: decoder_position_ids = tf.range(decoder_input_ids.shape[1]) return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_position_ids': decoder_position_ids, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, 'bias_layer', None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) # File: transformers-main/src/transformers/models/blenderbot/tokenization_blenderbot.py """""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json'} @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class BlenderbotTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): vocab = dict(self.encoder).copy() vocab.update(self.added_tokens_encoder) return vocab def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None): return token_ids_0 + [self.eos_token_id] # File: transformers-main/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json'} class BlenderbotTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = BlenderbotTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, trim_offsets=True, **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space tokenizer_component = 'post_processor' tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) if 'sep' in state: state['sep'] = tuple(state['sep']) if 'cls' in state: state['cls'] = tuple(state['cls']) changes_to_apply = False if state.get('add_prefix_space', add_prefix_space) != add_prefix_space: state['add_prefix_space'] = add_prefix_space changes_to_apply = True if state.get('trim_offsets', trim_offsets) != trim_offsets: state['trim_offsets'] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop('type')) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) @property def mask_token(self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def mask_token(self, value): value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value self._mask_token = value def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None): return token_ids_0 + [self.eos_token_id] # File: transformers-main/src/transformers/models/blenderbot_small/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_blenderbot_small': ['BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig'], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_blenderbot_small_fast'] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_blenderbot_small'] = ['BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_blenderbot_small'] = ['TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_blenderbot_small'] = ['FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel'] if TYPE_CHECKING: from .configuration_blenderbot_small import BlenderbotSmallConfig, BlenderbotSmallOnnxConfig from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py """""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging logger = logging.get_logger(__name__) class BlenderbotSmallConfig(PretrainedConfig): model_type = 'blenderbot-small' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=50265, max_position_embeddings=512, encoder_layers=8, encoder_ffn_dim=2048, encoder_attention_heads=16, decoder_layers=8, decoder_ffn_dim=2048, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=1, scale_embedding=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, forced_eos_token_id=2, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs) class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: common_inputs['decoder_input_ids'] = {0: 'batch'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'} common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') elif self.task == 'causal-lm': common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})]) if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} else: common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})]) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ['default', 'seq2seq-lm']: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: (num_encoder_layers, _) = self.num_layers for i in range(num_encoder_layers): common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'} common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework) decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, encoder_seq_length) = common_inputs['input_ids'].shape decoder_seq_length = common_inputs['decoder_input_ids'].shape[1] (num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads) decoder_past_length = decoder_seq_length + 3 decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads) common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1) common_inputs['past_key_values'] = [] (num_encoder_layers, num_decoder_layers) = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(min_num_layers): common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape))) shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 (num_encoder_layers, _) = self.num_layers (num_encoder_attention_heads, _) = self.num_attention_heads past_shape = (batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads) mask_dtype = common_inputs['attention_mask'].dtype common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: if self.task in ['default', 'seq2seq-lm']: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) elif self.task == 'causal-lm': common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ['default', 'seq2seq-lm']: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) # File: transformers-main/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py """""" import copy import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_blenderbot_small import BlenderbotSmallConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BlenderbotSmallConfig' def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError('self.model.config.pad_token_id has to be defined.') shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0): (bsz, seq_len) = input_ids_shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions) class BlenderbotSmallAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[BlenderbotSmallConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class BlenderbotSmallEncoderLayer(nn.Module): def __init__(self, config: BlenderbotSmallConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs BLENDERBOT_SMALL_ATTENTION_CLASSES = {'eager': BlenderbotSmallAttention} class BlenderbotSmallDecoderLayer(nn.Module): def __init__(self, config: BlenderbotSmallConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, is_causal=True, config=config) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BLENDERBOT_SMALL_ATTENTION_CLASSES[config._attn_implementation](self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BlenderbotSmallPreTrainedModel(PreTrainedModel): config_class = BlenderbotSmallConfig base_model_prefix = 'model' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids, 'decoder_input_ids': input_ids} return dummy_inputs BLENDERBOT_SMALL_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BlenderbotSmallConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BLENDERBOT_SMALL_GENERATION_EXAMPLE = '\n Conversation example:\n\n ```python\n >>> from transformers import AutoTokenizer, BlenderbotSmallForConditionalGeneration\n\n >>> mname = "facebook/blenderbot_small-90M"\n >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = AutoTokenizer.from_pretrained(mname)\n >>> UTTERANCE = "My friends are cool but they eat too many carbs."\n >>> print("Human: ", UTTERANCE)\n Human: My friends are cool but they eat too many carbs.\n\n >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")\n >>> reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])\n Bot: what kind of carbs do they eat? i don\'t know much about carbs.\n\n >>> REPLY = "I\'m not sure"\n >>> print("Human: ", REPLY)\n Human: I\'m not sure\n\n >>> NEXT_UTTERANCE = (\n ... "My friends are cool but they eat too many carbs.__end__ __start__what kind of carbs do they eat? "\n ... "i don\'t know much about carbs__end__ "\n ... "__start__ I\'m not sure."\n ... )\n >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")\n >>> next_reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])\n Bot: they eat a lot of carbs. carbs are high in fat, protein, and fats.\n ```\n' BLENDERBOT_SMALL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel): def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, embed_dim) self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False self.post_init() def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel): def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding]=None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model) self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input_shape, past_key_values_length) inputs_embeds = self.layernorm_embedding(inputs_embeds) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.', BLENDERBOT_SMALL_START_DOCSTRING) class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel): _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight'] def __init__(self, config: BlenderbotSmallConfig): super().__init__(config) (padding_idx, vocab_size) = (config.pad_token_id, config.vocab_size) self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BlenderbotSmallEncoder(config, self.shared) self.decoder = BlenderbotSmallDecoder(config, self.shared) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings('The BlenderbotSmall Model with a language modeling head. Can be used for summarization.', BLENDERBOT_SMALL_START_DOCSTRING) class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel): base_model_prefix = 'model' _keys_to_ignore_on_load_missing = ['final_logits_bias'] _tied_weights_keys = ['decoder.embed_tokens.weight', 'encoder.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config: BlenderbotSmallConfig): super().__init__(config) self.model = BlenderbotSmallModel(config) self.register_buffer('final_logits_bias', torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) self._resize_final_logits_bias(new_embeddings.weight.shape[0]) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer('final_logits_bias', new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning('The `use_cache` argument is changed to `False` since `labels` is provided.') use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if decoder_input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = decoder_input_ids.shape[1] - 1 decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = BlenderbotSmallDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BlenderbotSmallDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py """""" import math import random from functools import partial from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, logging, replace_return_docstrings from .configuration_blenderbot_small import BlenderbotSmallConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/blenderbot_small-90M' _CONFIG_FOR_DOC = 'BlenderbotSmallConfig' BLENDERBOT_SMALL_START_DOCSTRING = '\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' BLENDERBOT_SMALL_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING = '\n Args:\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For translation and summarization training, `decoder_input_ids` should be provided. If no\n `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right\n for denoising pre-training following the paper.\n encoder_outputs (`tuple(tuple(jnp.ndarray)`):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the\n paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxBlenderbotSmallAttention(nn.Module): config: BlenderbotSmallConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') dense = partial(nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) (self.q_proj, self.k_proj, self.v_proj) = (dense(), dense(), dense()) self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]: is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.q_proj(hidden_states) if is_cross_attention: key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) class FlaxBlenderbotSmallEncoderLayer(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotSmallAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense(self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxBlenderbotSmallEncoderLayerCollection(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBlenderbotSmallEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers)] self.layerdrop = self.config.encoder_layerdrop def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions, deterministic) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxBlenderbotSmallDecoderLayer(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxBlenderbotSmallAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxBlenderbotSmallAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense(self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.fc2 = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: residual = hidden_states (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class FlaxBlenderbotSmallDecoderLayerCollection(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBlenderbotSmallDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers)] self.layerdrop = self.config.decoder_layerdrop def __call__(self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.layerdrop: layer_outputs = (None, None, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, deterministic=deterministic) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) class FlaxBlenderbotSmallEncoder(nn.Module): config: BlenderbotSmallConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_source_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed(self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std)) self.layers = FlaxBlenderbotSmallEncoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(position_ids) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs return FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class FlaxBlenderbotSmallDecoder(nn.Module): config: BlenderbotSmallConfig embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout_layer = nn.Dropout(rate=self.config.dropout) embed_dim = self.config.d_model self.padding_idx = self.config.pad_token_id self.max_target_positions = self.config.max_position_embeddings self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 self.embed_positions = nn.Embed(self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal(self.config.init_std)) self.layers = FlaxBlenderbotSmallDecoderLayerCollection(self.config, self.dtype) self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): input_shape = input_ids.shape input_ids = input_ids.reshape(-1, input_shape[-1]) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale positions = self.embed_positions(position_ids) inputs_embeds = self.layernorm_embedding(inputs_embeds) hidden_states = inputs_embeds + positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) class FlaxBlenderbotSmallModule(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.shared = nn.Embed(self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype) self.encoder = FlaxBlenderbotSmallEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) self.decoder = FlaxBlenderbotSmallDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) class FlaxBlenderbotSmallPreTrainedModel(FlaxPreTrainedModel): config_class = BlenderbotSmallConfig base_model_prefix: str = 'model' module_class: nn.Module = None def __init__(self, config: BlenderbotSmallConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') input_ids = input_ids.at[..., -1].set(self.config.eos_token_id) attention_mask = jnp.ones_like(input_ids) decoder_input_ids = input_ids decoder_attention_mask = jnp.ones_like(input_ids) (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache']) @add_start_docstrings(BLENDERBOT_SMALL_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotSmallConfig) def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward) @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotSmallConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: (outputs, past) = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past) = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs def __call__(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs) @add_start_docstrings('The bare BlenderbotSmall Model transformer outputting raw hidden-states without any specific head on top.', BLENDERBOT_SMALL_START_DOCSTRING) class FlaxBlenderbotSmallModel(FlaxBlenderbotSmallPreTrainedModel): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 module_class = FlaxBlenderbotSmallModule append_call_sample_docstring(FlaxBlenderbotSmallModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) class FlaxBlenderbotSmallForConditionalGenerationModule(nn.Module): config: BlenderbotSmallConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.model = FlaxBlenderbotSmallModule(config=self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.model.shared.num_embeddings, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)) self.final_logits_bias = self.param('final_logits_bias', self.bias_init, (1, self.model.shared.num_embeddings)) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, position_ids=position_ids, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.variables['params']['shared']['embedding'] lm_logits = self.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype)) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput(logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.', BLENDERBOT_SMALL_START_DOCSTRING) class FlaxBlenderbotSmallForConditionalGeneration(FlaxBlenderbotSmallPreTrainedModel): module_class = FlaxBlenderbotSmallForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(BLENDERBOT_SMALL_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotSmallConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, deterministic: bool=True, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.variables['params']['shared']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) lm_logits += module.final_logits_bias.astype(self.dtype) return (lm_logits, outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: (lm_logits, decoder_outputs) = outputs else: ((lm_logits, decoder_outputs), past) = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs def prepare_inputs_for_generation(self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array]=None, decoder_attention_mask: Optional[jax.Array]=None, encoder_outputs=None, **kwargs): (batch_size, seq_length) = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'encoder_attention_mask': attention_mask, 'decoder_attention_mask': extended_attention_mask, 'decoder_position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['decoder_position_ids'] = model_kwargs['decoder_position_ids'][:, -1:] + 1 return model_kwargs FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING = '\n Returns:\n\n Summarization example:\n\n ```py\n >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration\n\n >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")\n\n >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs["input_ids"]).sequences\n >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))\n ```\n\n Mask filling example:\n\n ```py\n >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration\n\n >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M")\n >>> TXT = "My friends are but they eat too many carbs."\n\n >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")\n >>> input_ids = tokenizer([TXT], return_tensors="np")["input_ids"]\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)\n >>> values, predictions = jax.lax.top_k(probs)\n\n >>> tokenizer.decode(predictions).split()\n ```\n' overwrite_call_docstring(FlaxBlenderbotSmallForConditionalGeneration, BLENDERBOT_SMALL_INPUTS_DOCSTRING + FLAX_BLENDERBOT_SMALL_CONDITIONAL_GENERATION_DOCSTRING) append_replace_return_docstrings(FlaxBlenderbotSmallForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py """""" from __future__ import annotations import random from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_blenderbot_small import BlenderbotSmallConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/blenderbot_small-90M' _CONFIG_FOR_DOC = 'BlenderbotSmallConfig' LARGE_NEGATIVE = -100000000.0 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) shifted_input_ids = tf.where(shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids) assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int=0): bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding): def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): super().__init__(num_embeddings, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int=0, position_ids: tf.Tensor | None=None): if position_ids is None: seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name='range') position_ids += past_key_values_length return super().call(tf.cast(position_ids, dtype=tf.int32)) class TFBlenderbotSmallAttention(keras.layers.Layer): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='k_proj') self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='q_proj') self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='v_proj') self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='out_proj') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call(self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor | None]: is_cross_attention = key_value_states is not None (bsz, tgt_len, embed_dim) = shape_list(hidden_states) query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal(shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}') if attention_mask is not None: tf.debugging.assert_equal(shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}') attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal(shape_list(layer_head_mask), [self.num_heads], message=f'Head mask for a single layer should be of size {self.num_heads}, but is {shape_list(layer_head_mask)}') attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal(shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}') attn_output = tf.transpose(tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return (attn_output, attn_weights, past_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFBlenderbotSmallEncoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotSmallAttention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name='self_attn') self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None, layer_head_mask: tf.Tensor | None, training: Optional[bool]=False) -> tf.Tensor: residual = hidden_states (hidden_states, self_attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask) tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}') hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBlenderbotSmallDecoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotSmallAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name='self_attn', is_decoder=True) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='self_attn_layer_norm') self.encoder_attn = TFBlenderbotSmallAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name='encoder_attn', is_decoder=True) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='encoder_attn_layer_norm') self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name='fc1') self.fc2 = keras.layers.Dense(self.embed_dim, name='fc2') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-05, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'self_attn_layer_norm', None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'encoder_attn', None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, 'encoder_attn_layer_norm', None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel): config_class = BlenderbotSmallConfig base_model_prefix = 'model' BLENDERBOT_SMALL_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' BLENDERBOT_SMALL_GENERATION_EXAMPLE = '\n Conversation example::\n\n ```py\n >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration\n\n >>> mname = "facebook/blenderbot_small-90M"\n >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = AutoTokenizer.from_pretrained(mname)\n\n >>> UTTERANCE = "My friends are cool but they eat too many carbs."\n >>> print("Human: ", UTTERANCE)\n >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")\n\n >>> reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])\n what kind of carbs do they eat? i don\'t know much about carbs.\n\n >>> REPLY = "I\'m not sure"\n >>> print("Human: ", REPLY)\n >>> NEXT_UTTERANCE = (\n ... "My friends are cool but they eat too many carbs. "\n ... "what kind of carbs do they eat? i don\'t know much about carbs. "\n ... "I\'m not sure."\n ... )\n\n >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")\n >>> inputs.pop("token_type_ids")\n >>> next_reply_ids = model.generate(**inputs)\n >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])\n ```\n' BLENDERBOT_SMALL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tf.FloatTensor`, *optional*):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape `(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`). Set to `False` during training, `True` during generation\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @keras_serializable class TFBlenderbotSmallEncoder(keras.layers.Layer): config_class = BlenderbotSmallConfig '' def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f'layers.{i}') for i in range(config.encoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_embedding') self.embed_dim = config.d_model def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call(self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) if attention_mask is not None: attention_mask = _expand_mask(attention_mask) else: attention_mask = None encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: tf.debugging.assert_equal(shape_list(head_mask)[0], len(self.layers), message=f'The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(head_mask)[0]}.') for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue (hidden_states, attn) = encoder_layer(hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None) if output_attentions: all_attentions += (attn,) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layernorm_embedding', None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.embed_dim]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotSmallDecoder(keras.layers.Layer): config_class = BlenderbotSmallConfig '' def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding]=None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model, name='embed_positions') self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f'layers.{i}') for i in range(config.decoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_embedding') self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call(self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask(tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) if position_ids is None: positions = self.embed_positions(input_shape, past_key_values_length) else: positions = self.embed_positions(input_shape, position_ids=position_ids) hidden_states = self.layernorm_embedding(inputs_embeds) + positions hidden_states = self.dropout(hidden_states, training=training) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions and encoder_hidden_states is not None else None present_key_values = () if use_cache else None for (attn_mask_name, attn_mask) in [('head_mask', head_mask), ('cross_attn_head_mask', cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal(shape_list(attn_mask)[0], len(self.layers), message=f'The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for {shape_list(attn_mask)[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None (hidden_states, layer_self_attn, layer_cross_attn, present_key_value) = decoder_layer(hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return (hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns) else: return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_positions', None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, 'layernorm_embedding', None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.config.d_model]) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotSmallMainLayer(keras.layers.Layer): config_class = BlenderbotSmallConfig def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = keras.layers.Embedding(input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name='model.shared') self.shared.load_weight_prefix = 'model.shared' self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name='encoder') self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name='decoder') def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call(self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs): output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) elif return_dict and (not isinstance(encoder_outputs, TFBaseModelOutput)): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) elif not return_dict and (not isinstance(encoder_outputs, tuple)): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder(decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.shared.load_weight_prefix + '/' + self.shared.name + '/'): self.shared.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings('The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.', BLENDERBOT_SMALL_START_DOCSTRING) class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel): def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotSmallMainLayer(config, name='model') def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False, **kwargs) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]: outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput(last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class BiasLayer(keras.layers.Layer): def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings('The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.', BLENDERBOT_SMALL_START_DOCSTRING) class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotSmallMainLayer(config, name='model') self.use_cache = config.use_cache self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, config.vocab_size], initializer='zeros', trainable=False) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {'final_logits_bias': self.bias_layer.bias} def set_bias(self, value): vocab_size = value['final_logits_bias'].shape[-1] self.bias_layer = BiasLayer(name='final_logits_bias', shape=[1, vocab_size], initializer='zeros', trainable=False) self.bias_layer.bias.assign(value['final_logits_bias']) @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput(logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: decoder_position_ids = past_key_values[0][0].shape[2] else: decoder_position_ids = tf.range(decoder_input_ids.shape[1]) return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_position_ids': decoder_position_ids, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, 'bias_layer', None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None) # File: transformers-main/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py """""" import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json'} def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class BlenderbotSmallTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, bos_token='__start__', eos_token='__end__', unk_token='__unk__', pad_token='__null__', **kwargs): with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token: str) -> str: if token in self.cache: return self.cache[token] token = re.sub('([.,!?()])', ' \\1', token) token = re.sub("(')", ' \\1 ', token) token = re.sub('\\s{2,}', ' ', token) if '\n' in token: token = token.replace('\n', ' __newln__') tokens = token.split(' ') words = [] for token in tokens: if not len(token): continue token = token.lower() word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + '']) pairs = get_pairs(word) if not pairs: words.append(token) continue while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = '@@ '.join(word) word = word[:-4] self.cache[token] = word words.append(word) return ' '.join(words) def _tokenize(self, text: str) -> List[str]: split_tokens = [] words = re.findall('\\S+\\n?', text) for token in words: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token: str) -> int: token = token.lower() return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: List[str]) -> str: out_string = ' '.join(tokens).replace('@@ ', '').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) # File: transformers-main/src/transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py """""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json'} class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BlenderbotSmallTokenizer def __init__(self, vocab_file=None, merges_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, trim_offsets=True, **kwargs): super().__init__(ByteLevelBPETokenizer(vocab=vocab_file, merges=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs) self.add_prefix_space = add_prefix_space def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # File: transformers-main/src/transformers/models/blip/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available _import_structure = {'configuration_blip': ['BlipConfig', 'BlipTextConfig', 'BlipVisionConfig'], 'processing_blip': ['BlipProcessor']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_blip'] = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_blip'] = ['BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_blip'] = ['TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval'] if TYPE_CHECKING: from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/blip/configuration_blip.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BlipTextConfig(PretrainedConfig): model_type = 'blip_text_model' def __init__(self, vocab_size=30524, hidden_size=768, encoder_hidden_size=768, intermediate_size=3072, projection_dim=768, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=512, hidden_act='gelu', layer_norm_eps=1e-12, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, bos_token_id=30522, eos_token_id=2, pad_token_id=0, sep_token_id=102, is_decoder=True, use_cache=True, label_smoothing=0.0, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_hidden_size = encoder_hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.hidden_dropout_prob = hidden_dropout_prob self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.is_decoder = is_decoder self.use_cache = use_cache self.label_smoothing = label_smoothing @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'blip': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class BlipVisionConfig(PretrainedConfig): model_type = 'blip_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, image_size=384, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=1e-10, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'blip': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class BlipConfig(PretrainedConfig): model_type = 'blip' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, image_text_hidden_size=256, label_smoothing=0.0, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.') self.text_config = BlipTextConfig(**text_config) self.vision_config = BlipVisionConfig(**vision_config) self.text_config.encoder_hidden_size = self.vision_config.hidden_size self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 self.image_text_hidden_size = image_text_hidden_size self.label_smoothing = label_smoothing @classmethod def from_text_vision_configs(cls, text_config: BlipTextConfig, vision_config: BlipVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/blip/convert_blip_original_pytorch_to_hf.py import argparse import re import requests import torch from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering def load_demo_image(image_size, device): img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') transform = transforms.Compose([transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) image = transform(raw_image).unsqueeze(0).to(device) return image def rename_key(key): if 'visual_encoder' in key: key = re.sub('visual_encoder*', 'vision_model.encoder', key) if 'blocks' in key: key = re.sub('blocks', 'layers', key) if 'attn' in key: key = re.sub('attn', 'self_attn', key) if 'norm1' in key: key = re.sub('norm1', 'layer_norm1', key) if 'norm2' in key: key = re.sub('norm2', 'layer_norm2', key) if 'encoder.norm' in key: key = re.sub('encoder.norm', 'post_layernorm', key) if 'encoder.patch_embed.proj' in key: key = re.sub('encoder.patch_embed.proj', 'embeddings.patch_embedding', key) if 'encoder.pos_embed' in key: key = re.sub('encoder.pos_embed', 'embeddings.position_embedding', key) if 'encoder.cls_token' in key: key = re.sub('encoder.cls_token', 'embeddings.class_embedding', key) if 'self_attn' in key: key = re.sub('self_attn.proj', 'self_attn.projection', key) return key @torch.no_grad() def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None): if config_path is not None: config = BlipConfig.from_pretrained(config_path) else: config = BlipConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = BlipForConditionalGeneration(config).eval() model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' pt_model = blip_decoder(pretrained=model_url, image_size=384, vit='base') pt_model = pt_model.eval() modified_state_dict = pt_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_model.load_state_dict(modified_state_dict) image_size = 384 image = load_demo_image(image_size=image_size, device='cpu') tokenizer = BertTokenizer.from_pretrained('google-bert/bert-base-uncased') input_ids = tokenizer(['a picture of']).input_ids out = hf_model.generate(image, input_ids) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] out = hf_model.generate(image) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(pytorch_dump_folder_path) model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit='base') vqa_model.eval() modified_state_dict = vqa_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_vqa_model = BlipForQuestionAnswering(config) hf_vqa_model.load_state_dict(modified_state_dict) question = ['How many dogs are in this image?'] question_input_ids = tokenizer(question, return_tensors='pt').input_ids answer = hf_vqa_model.generate(question_input_ids, image) print(tokenizer.decode(answer[0])) assert tokenizer.decode(answer[0]) == '[UNK] 1 [SEP]' if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa') model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit='base') itm_model.eval() modified_state_dict = itm_model.state_dict() for key in modified_state_dict.copy(): value = modified_state_dict.pop(key) renamed_key = rename_key(key) modified_state_dict[renamed_key] = value hf_itm_model = BlipForImageTextRetrieval(config) question = ['A picture of a woman with a dog sitting in a beach'] question_input_ids = tokenizer(question, return_tensors='pt', padding='max_length', truncation=True, max_length=35).input_ids hf_itm_model.load_state_dict(modified_state_dict) hf_itm_model.eval() out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True) out = hf_itm_model(question_input_ids, image, use_itm_head=False) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') args = parser.parse_args() convert_blip_checkpoint(args.pytorch_dump_folder_path, args.config_path) # File: transformers-main/src/transformers/models/blip/image_processing_blip.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class BlipImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 384, 'width': 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: bool=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) return encoded_outputs # File: transformers-main/src/transformers/models/blip/modeling_blip.py """""" import warnings from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn.functional import normalize from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'Salesforce/blip-vqa-base' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def blip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class BlipForConditionalGenerationModelOutput(ModelOutput): loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @property def decoder_logits(self): warnings.warn('`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers. Please use the `logits` attribute to retrieve the final output instead.', FutureWarning) return self.logits @dataclass class BlipTextVisionModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BlipImageTextMatchingModelOutput(ModelOutput): itm_score: Optional[torch.FloatTensor] = None loss: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None vision_pooler_output: Optional[torch.FloatTensor] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None question_embeds: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BlipOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class BlipVisionEmbeddings(nn.Module): def __init__(self, config: BlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, _, height, width) = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings class BlipTextEmbeddings(nn.Module): def __init__(self, config: BlipTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class BlipAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() mixed_qkv = self.qkv(hidden_states).reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) (query_states, key_states, value_states) = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs class BlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class BlipEncoderLayer(nn.Module): def __init__(self, config: BlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = BlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = BlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class BlipPreTrainedModel(PreTrainedModel): config_class = BlipConfig base_model_prefix = 'blip' supports_gradient_checkpointing = True def _init_weights(self, module): factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() if isinstance(module, BlipVisionEmbeddings): if hasattr(self.config, 'vision_config'): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BLIP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BlipConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' BLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' class BlipEncoder(nn.Module): def __init__(self, config: BlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class BlipVisionModel(BlipPreTrainedModel): main_input_name = 'pixel_values' config_class = BlipVisionConfig def __init__(self, config: BlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = BlipVisionEmbeddings(config) self.encoder = BlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings @add_start_docstrings('\n This model is going to be deprecated in future versions. Please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.\n ', BLIP_START_DOCSTRING) class BlipModel(BlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): raise TypeError(f'config.text_config is expected to be of type BlipTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError(f'config.vision_config is expected to be of type BlipVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = BlipTextModel(text_config) self.vision_model = BlipVisionModel(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) logger.warning('`BlipModel` is going to be deprecated in future release, please use `BlipForConditionalGeneration`, `BlipForQuestionAnswering` or `BlipForImageTextRetrieval` depending on your usecase.') self.post_init() @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) def get_multimodal_features(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=True, output_hidden_states=True, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict) pooled_output = text_outputs[1] multimodal_features = self.text_projection(pooled_output) return multimodal_features @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BlipOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = blip_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return BlipOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) @add_start_docstrings('\n BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass\n `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,\n the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption\n from the text input. If no text input is provided, the decoder will start with the [BOS] token only.\n ', BLIP_START_DOCSTRING) class BlipForConditionalGeneration(BlipPreTrainedModel): config_class = BlipConfig _tied_weights_keys = ['text_decoder.cls.predictions.decoder.bias'] main_input_name = 'pixel_values' def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig) def forward(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BlipForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] outputs = self.text_decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, return_dict=return_dict, reduction='mean') if not return_dict: outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) return BlipForConditionalGenerationModelOutput(loss=outputs.loss, logits=outputs.logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: batch_size = pixel_values.shape[0] vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) elif input_ids is None: input_ids = torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) input_ids[:, 0] = self.config.text_config.bos_token_id attention_mask = attention_mask[:, :-1] if attention_mask is not None else None outputs = self.text_decoder.generate(input_ids=input_ids[:, :-1], eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **generate_kwargs) return outputs @add_start_docstrings('\n BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text\n decoder. The vision encoder will encode the input image, the text encoder will encode the input question together\n with the encoding of the image, and the text decoder will output the answer to the question.\n ', BLIP_START_DOCSTRING) class BlipForQuestionAnswering(BlipPreTrainedModel): config_class = BlipConfig _tied_weights_keys = ['text_decoder.cls.predictions.decoder.bias'] def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig) def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BlipTextVisionModelOutput]: if labels is None and decoder_input_ids is None: raise ValueError('Either `decoder_input_ids` or `labels` should be passed when calling `forward` with `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`') return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict) if labels is not None and decoder_input_ids is None: decoder_input_ids = labels question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state answer_output = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, return_dict=return_dict, reduction='mean') if labels is not None: decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean() else: decoder_loss = None if not return_dict: outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) return BlipTextVisionModelOutput(loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @torch.no_grad() def generate(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) question_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False) question_embeds = question_outputs[0] question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device) bos_ids = torch.full((question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device) outputs = self.text_decoder.generate(input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs) return outputs @add_start_docstrings('\n BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of\n image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ', BLIP_START_DOCSTRING) class BlipForImageTextRetrieval(BlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size) self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size) self.itm_head = nn.Linear(config.text_config.hidden_size, 2) self.decoder_pad_token_id = config.text_config.pad_token_id if not hasattr(config, 'decoder_pad_token_id') else config.decoder_pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id if not hasattr(config, 'decoder_start_token_id') else config.decoder_start_token_id self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig) def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, use_itm_head: Optional[bool]=True, attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BlipTextVisionModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long) if use_itm_head: question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state output = self.itm_head(question_embeds[:, 0, :]) else: question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1) text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1) output = image_feat @ text_feat.t() if not return_dict: outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,) return tuple((output for output in outputs if output is not None)) return BlipImageTextMatchingModelOutput(itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, question_embeds=question_embeds) # File: transformers-main/src/transformers/models/blip/modeling_blip_text.py import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import logging from .configuration_blip import BlipTextConfig logger = logging.get_logger(__name__) class BlipTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if inputs_embeds is None: input_ids = input_ids.to(self.word_embeddings.weight.device) inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BlipTextSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask.to(attention_scores.device) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class BlipTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BlipTextAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BlipTextSelfAttention(config, is_cross_attention) self.output = BlipTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BlipTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BlipTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BlipTextLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BlipTextAttention(config) self.layer_num = layer_num if self.config.is_decoder: self.crossattention = BlipTextAttention(config, is_cross_attention=self.config.is_decoder) self.intermediate = BlipTextIntermediate(config) self.output = BlipTextOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BlipTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BlipTextLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.is_decoder else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class BlipTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BlipTextPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BlipTextLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BlipTextPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BlipTextOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BlipTextLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class BlipTextPreTrainedModel(PreTrainedModel): config_class = BlipTextConfig base_model_prefix = 'bert' _no_split_modules = [] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BlipTextModel(BlipTextPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BlipTextEmbeddings(config) self.encoder = BlipTextEncoder(config) self.pooler = BlipTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: if is_decoder: (batch_size, seq_length) = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape)) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, is_decoder: Optional[bool]=False) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() (batch_size, seq_length) = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] (batch_size, seq_length) = input_shape device = inputs_embeds.device elif encoder_embeds is not None: input_shape = encoder_embeds.size()[:-1] (batch_size, seq_length) = input_shape device = encoder_embeds.device else: raise ValueError('You have to specify either input_ids or inputs_embeds or encoder_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length)).to(device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states[0].size() else: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) else: embedding_output = encoder_embeds encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) class BlipTextLMHeadModel(BlipTextPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BlipTextModel(config, add_pooling_layer=False) self.cls = BlipTextOnlyMLMHead(config) self.label_smoothing = config.label_smoothing def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, return_logits: Optional[bool]=False, is_decoder: Optional[bool]=True, reduction: Optional[str]='mean') -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device) loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing) lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if reduction == 'none': lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'encoder_hidden_states': model_kwargs.get('encoder_hidden_states', None), 'encoder_attention_mask': model_kwargs.get('encoder_attention_mask', None), 'is_decoder': True} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/blip/modeling_tf_blip.py """""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import tensorflow as tf from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFPreTrainedModel, get_initializer, get_tf_activation, keras, keras_serializable, shape_list, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig from .modeling_tf_blip_text import BLIP_TEXT_INPUTS_DOCSTRING, TFBlipTextLMHeadModel, TFBlipTextModel logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'Salesforce/blip-vqa-base' def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean(keras.metrics.sparse_categorical_crossentropy(y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True)) def blip_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 @dataclass class TFBlipForConditionalGenerationModelOutput(ModelOutput): loss: Tuple[tf.Tensor] | None = None logits: Tuple[tf.Tensor] | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @property def decoder_logits(self): warnings.warn('`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers. Please use the `logits` attribute to retrieve the final output instead.', FutureWarning) return self.logits @dataclass class TFBlipTextVisionModelOutput(ModelOutput): loss: tf.Tensor | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFBlipImageTextMatchingModelOutput(ModelOutput): itm_score: tf.Tensor | None = None loss: tf.Tensor | None = None image_embeds: tf.Tensor | None = None last_hidden_state: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None vision_pooler_output: tf.Tensor | None = None attentions: Tuple[tf.Tensor, ...] | None = None question_embeds: Tuple[tf.Tensor] | None = None @dataclass class TFBlipOutput(ModelOutput): loss: tf.Tensor | None = None logits_per_image: tf.Tensor = None logits_per_text: tf.Tensor = None text_embeds: tf.Tensor = None image_embeds: tf.Tensor = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class TFBlipVisionEmbeddings(keras.layers.Layer): def __init__(self, config: BlipVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = keras.layers.Conv2D(filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, kernel_initializer=get_initializer(self.config.initializer_range), data_format='channels_last', name='patch_embedding') self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 def build(self, input_shape=None): self.class_embedding = self.add_weight(shape=(1, 1, self.embed_dim), initializer=get_initializer(self.config.initializer_range), trainable=True, name='class_embedding') self.position_embedding = self.add_weight(shape=(1, self.num_positions, self.embed_dim), initializer=get_initializer(self.config.initializer_range), trainable=True, name='position_embedding') if self.built: return self.built = True if getattr(self, 'patch_embedding', None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, 3]) def call(self, pixel_values: tf.Tensor) -> tf.Tensor: batch_size = tf.shape(pixel_values)[0] pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) patch_embeds = self.patch_embedding(pixel_values) patch_embeds = tf.reshape(patch_embeds, (batch_size, self.num_patches, -1)) class_embeds = tf.broadcast_to(self.class_embedding, (batch_size, 1, self.embed_dim)) embeddings = tf.concat([class_embeds, patch_embeds], axis=1) embeddings = embeddings + self.position_embedding[:, :tf.shape(embeddings)[1], :] return embeddings class TFBlipTextEmbeddings(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape=None): with tf.name_scope('token_embedding'): self.weight = self.add_weight(shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='weight') with tf.name_scope('position_embedding'): self.position_embedding = self.add_weight(shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='embeddings') super().build(input_shape) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings class TFBlipAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = keras.layers.Dropout(config.attention_dropout, name='dropout') self.qkv = keras.layers.Dense(3 * self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='qkv') self.projection = keras.layers.Dense(self.embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='projection') def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=False, training: Optional[bool]=None) -> Tuple[tf.Tensor, tf.Tensor | None, Tuple[tf.Tensor] | None]: (bsz, tgt_len, embed_dim) = shape_list(hidden_states) mixed_qkv = self.qkv(hidden_states) mixed_qkv = tf.reshape(mixed_qkv, (bsz, tgt_len, 3, self.num_heads, self.head_dim)) mixed_qkv = tf.transpose(mixed_qkv, perm=(2, 0, 3, 1, 4)) (query_states, key_states, value_states) = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = query_states @ tf.transpose(key_states, (0, 1, 3, 2)) attention_scores = attention_scores * self.scale attention_probs = stable_softmax(attention_scores, axis=-1) attention_probs = self.dropout(attention_probs, training=training) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.transpose(attention_probs @ value_states, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.embed_dim] context_layer = tf.reshape(context_layer, new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dropout', None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, 'qkv', None) is not None: with tf.name_scope(self.qkv.name): self.qkv.build([None, None, self.embed_dim]) if getattr(self, 'projection', None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, self.embed_dim]) class TFBlipMLP(keras.layers.Layer): def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.activation_fn = get_tf_activation(config.hidden_act) in_proj_std = config.hidden_size ** (-0.5) * (2 * config.num_hidden_layers) ** (-0.5) fc_std = (2 * config.hidden_size) ** (-0.5) self.fc1 = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name='fc1') self.fc2 = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name='fc2') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(inputs=hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.config.hidden_size]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.intermediate_size]) class TFBlipEncoderLayer(keras.layers.Layer): def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFBlipAttention(config, name='self_attn') self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm1') self.mlp = TFBlipMLP(config, name='mlp') self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm2') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, output_attentions: Optional[bool]=False, training: Optional[bool]=None) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions, training=training) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'layer_norm1', None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'layer_norm2', None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFBlipPreTrainedModel(TFPreTrainedModel): config_class = BlipConfig base_model_prefix = 'blip' _keys_to_ignore_on_load_missing = ['position_ids'] BLIP_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`BlipConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' BLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @keras_serializable class TFBlipEncoder(keras.layers.Layer): config_class = BlipConfig '' def __init__(self, config: BlipConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layers = [TFBlipEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)] @unpack_inputs def call(self, inputs_embeds, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFBlipVisionModel(TFBlipPreTrainedModel): main_input_name = 'pixel_values' config_class = BlipVisionConfig def __init__(self, config: BlipVisionConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.config = config self.embeddings = TFBlipVisionEmbeddings(config, name='embeddings') self.encoder = TFBlipEncoder(config, name='encoder') self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='post_layernorm') self.embed_dim = config.hidden_size def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutputWithPooling(last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=hs, attentions=attns) @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=BlipVisionConfig) def call(self, pixel_values: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(tf.expand_dims(pooled_output, 1)) pooled_output = tf.squeeze(pooled_output, 1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'post_layernorm', None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, None, self.embed_dim]) class TFBlipMainLayer(keras.layers.Layer): config_class = BlipConfig def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.text_config, BlipTextConfig): raise TypeError(f'config.text_config is expected to be of type BlipTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, BlipVisionConfig): raise TypeError(f'config.vision_config is expected to be of type BlipVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = TFBlipTextModel(text_config, name='text_model') self.vision_model = TFBlipVisionModel(vision_config, name='vision_model') self.visual_projection = keras.layers.Dense(self.projection_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='visual_projection') self.text_projection = keras.layers.Dense(self.projection_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='text_projection') self.config = config def build(self, input_shape=None): self.logit_scale = self.add_weight(name='logit_scale', shape=[], initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True) if self.built: return self.built = True if getattr(self, 'text_model', None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'visual_projection', None) is not None: with tf.name_scope(self.visual_projection.name): self.visual_projection.build([None, None, self.vision_embed_dim]) if getattr(self, 'text_projection', None) is not None: with tf.name_scope(self.text_projection.name): self.text_projection.build([None, None, self.text_embed_dim]) @unpack_inputs def call(self, input_ids: tf.Tensor | None=None, pixel_values: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / tf.norm(image_embeds, ord=2, axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(text_embeds, ord=2, axis=-1, keepdims=True) logit_scale = tf.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) loss = None if return_loss: loss = blip_loss(logits_per_text) loss = tf.reshape(loss, (1,)) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return TFBlipOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) class TFBlipModel(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = ['text_decoder.cls.predictions.decoder.bias'] main_input_name = 'input_ids' def __init__(self, config: BlipConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.blip = TFBlipMainLayer(config, name='blip') def serving_output(self, output: TFBlipOutput) -> TFBlipOutput: return TFBlipOutput(logits_per_image=output.logits_per_image, logits_per_text=output.logits_per_text, text_embeds=output.text_embeds, image_embeds=output.image_embeds) @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipOutput, config_class=BlipConfig) def call(self, input_ids: tf.Tensor | None=None, pixel_values: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipOutput]: outputs = self.blip(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, return_dict: Optional[bool]=None) -> tf.Tensor: return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.blip.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.blip.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: tf.Tensor | None=None, return_dict: Optional[bool]=None) -> tf.Tensor: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.blip.vision_model(pixel_values=pixel_values, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.blip.visual_projection(pooled_output) return image_features def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'blip', None) is not None: with tf.name_scope(self.blip.name): self.blip.build(None) @add_start_docstrings('\n BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass\n `input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,\n the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption\n from the text input. If no text input is provided, the decoder will start with the [BOS] token only.\n ', BLIP_START_DOCSTRING) class TFBlipForConditionalGeneration(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = ['text_decoder.cls.predictions.decoder.bias'] main_input_name = 'pixel_values' def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name='vision_model') self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name='text_decoder') self.decoder_input_ids = config.text_config.bos_token_id self.decoder_pad_token_id = config.text_config.pad_token_id def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipForConditionalGenerationModelOutput, config_class=BlipConfig) def call(self, pixel_values: tf.Tensor, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: tf.Tensor | None=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[0] outputs = self.text_decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, return_dict=False, training=training) if not return_dict: outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) if labels is not None: loss = outputs[0] logits = outputs[1] else: loss = None logits = outputs[0] if loss is not None and loss.shape.rank == 0: loss = tf.reshape(loss, (1,)) return TFBlipForConditionalGenerationModelOutput(loss=loss, logits=logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) def generate(self, pixel_values: tf.Tensor, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, **generate_kwargs) -> tf.Tensor: batch_size = pixel_values.shape[0] vision_outputs = self.vision_model(pixel_values=pixel_values) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32) if isinstance(input_ids, list): input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int32) elif input_ids is None: input_ids = tf.convert_to_tensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]], dtype=tf.int32) input_ids = tf.tile(input_ids, (batch_size, 1)) input_ids = tf.concat([tf.ones((batch_size, 1), dtype=tf.int32) * self.config.text_config.bos_token_id, input_ids[:, 1:]], axis=1) attention_mask = attention_mask[:, :-1] if attention_mask is not None else None outputs = self.text_decoder.generate(input_ids=input_ids[:, :-1], eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **generate_kwargs) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'text_decoder', None) is not None: with tf.name_scope(self.text_decoder.name): self.text_decoder.build(None) @add_start_docstrings('\n BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text\n decoder. The vision encoder will encode the input image, the text encoder will encode the input question together\n with the encoding of the image, and the text decoder will output the answer to the question.\n ', BLIP_START_DOCSTRING) class TFBlipForQuestionAnswering(TFBlipPreTrainedModel): config_class = BlipConfig _keys_to_ignore_on_load_missing = ['text_decoder.cls.predictions.decoder.bias'] def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name='vision_model') self.text_encoder = TFBlipTextModel(config.text_config, name='text_encoder', add_pooling_layer=False) self.text_decoder = TFBlipTextLMHeadModel(config.text_config, name='text_decoder') self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding def _shift_right(self, input_ids): decoder_start_token_id = self.decoder_start_token_id pad_token_id = self.decoder_pad_token_id if decoder_start_token_id is None or pad_token_id is None: raise ValueError('decoder_start_token_id and pad_token_id must be defined!') start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) start_tokens = tf.cast(start_tokens, input_ids.dtype) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) shifted_input_ids = tf.where(shifted_input_ids == -100, tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype), shifted_input_ids) tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)) return shifted_input_ids @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipTextVisionModelOutput, config_class=BlipVisionConfig) def call(self, input_ids: tf.Tensor, pixel_values: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: tf.Tensor | None=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipTextVisionModelOutput]: if labels is None and decoder_input_ids is None: raise ValueError('Either `decoder_input_ids` or `labels` should be passed when calling `TFBlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`') return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64) question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict, training=training) question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state if labels is not None and decoder_input_ids is None: decoder_input_ids = labels answer_output = self.text_decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, return_dict=return_dict, training=training) if labels is not None: decoder_loss = tf.reduce_mean(answer_output.loss) if return_dict else tf.reduce_mean(answer_output[0]) else: decoder_loss = None if not return_dict: outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) return TFBlipTextVisionModelOutput(loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) def generate(self, input_ids: tf.Tensor, pixel_values: tf.Tensor, attention_mask: tf.Tensor | None=None, **generate_kwargs) -> tf.Tensor: vision_outputs = self.vision_model(pixel_values=pixel_values) image_embeds = vision_outputs[0] image_attention_mask = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int32) if isinstance(input_ids, list): input_ids = tf.Tensor(input_ids) question_outputs = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False) question_embeds = question_outputs[0] question_attention_mask = tf.ones(shape_list(question_embeds)[:-1], dtype=tf.int32) bos_ids = tf.fill((tf.shape(question_embeds)[0], 1), value=tf.cast(self.decoder_start_token_id, input_ids.dtype)) outputs = self.text_decoder.generate(input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'text_encoder', None) is not None: with tf.name_scope(self.text_encoder.name): self.text_encoder.build(None) if getattr(self, 'text_decoder', None) is not None: with tf.name_scope(self.text_decoder.name): self.text_decoder.build(None) @add_start_docstrings('\n BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of\n image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ', BLIP_START_DOCSTRING) class TFBlipForImageTextRetrieval(TFBlipPreTrainedModel): config_class = BlipConfig def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.vision_model = TFBlipVisionModel(config.vision_config, name='vision_model') self.text_encoder = TFBlipTextModel(config.text_config, name='text_encoder', add_pooling_layer=False) self.vision_proj = keras.layers.Dense(config.image_text_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='vision_proj') self.text_proj = keras.layers.Dense(config.image_text_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='text_proj') self.itm_head = keras.layers.Dense(2, kernel_initializer=get_initializer(config.initializer_range), name='itm_head') self.decoder_pad_token_id = config.text_config.pad_token_id if not hasattr(config, 'decoder_pad_token_id') else config.decoder_pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id if not hasattr(config, 'decoder_start_token_id') else config.decoder_start_token_id self.config = config def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings.patch_embedding @unpack_inputs @add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBlipImageTextMatchingModelOutput, config_class=BlipVisionConfig) def call(self, input_ids: tf.Tensor, pixel_values: tf.Tensor | None=None, use_itm_head: Optional[bool]=True, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipImageTextMatchingModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[0] image_atts = tf.ones(shape_list(image_embeds)[:-1], dtype=tf.int64) itm_question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict, training=training) itm_question_embeds = itm_question_embeds[0] if not return_dict else itm_question_embeds.last_hidden_state itm_output = self.itm_head(itm_question_embeds[:, 0, :]) no_itm_question_embeds = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask, return_dict=return_dict, training=training) no_itm_question_embeds = no_itm_question_embeds[0] if not return_dict else no_itm_question_embeds.last_hidden_state (image_feat, _) = tf.linalg.normalize(self.vision_proj(image_embeds[:, 0, :]), ord=2, axis=-1) (text_feat, _) = tf.linalg.normalize(self.text_proj(no_itm_question_embeds[:, 0, :]), ord=2, axis=-1) no_itm_output = tf.matmul(image_feat, text_feat, transpose_b=True) if use_itm_head: output = itm_output question_embeds = itm_question_embeds else: output = no_itm_output question_embeds = no_itm_question_embeds if not return_dict: outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,) return tuple((output for output in outputs if output is not None)) return TFBlipImageTextMatchingModelOutput(itm_score=output, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, question_embeds=question_embeds) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'text_encoder', None) is not None: with tf.name_scope(self.text_encoder.name): self.text_encoder.build(None) if getattr(self, 'vision_proj', None) is not None: with tf.name_scope(self.vision_proj.name): self.vision_proj.build([None, None, self.config.vision_config.hidden_size]) if getattr(self, 'text_proj', None) is not None: with tf.name_scope(self.text_proj.name): self.text_proj.build([None, None, self.config.text_config.hidden_size]) if getattr(self, 'itm_head', None) is not None: with tf.name_scope(self.itm_head.name): self.itm_head.build([None, None, self.config.text_config.hidden_size]) # File: transformers-main/src/transformers/models/blip/modeling_tf_blip_text.py from __future__ import annotations import math from typing import Optional, Tuple import tensorflow as tf from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFCausalLMOutputWithCrossAttentions from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, get_tf_activation, keras, keras_serializable, shape_list, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, invert_attention_mask, stable_softmax from ...utils import add_start_docstrings_to_model_forward, logging from .configuration_blip import BlipTextConfig logger = logging.get_logger(__name__) BLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoProcessor`]. See [`BlipProcessor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class TFBlipTextEmbeddings(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.word_embeddings = keras.layers.Embedding(config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name='word_embeddings') self.position_embeddings = keras.layers.Embedding(config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name='position_embeddings') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name='dropout') self.position_ids = tf.expand_dims(tf.range(config.max_position_embeddings), 0) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def call(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0, training=None): if input_ids is not None: input_shape = tf.shape(input_ids) else: input_shape = tf.shape(inputs_embeds)[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'word_embeddings', None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, 'position_embeddings', None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, 'dropout', None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) class TFBlipTextSelfAttention(keras.layers.Layer): def __init__(self, config, is_cross_attention, **kwargs): super().__init__(**kwargs) self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = keras.layers.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_cross_attention = is_cross_attention def transpose_for_scores(self, x): new_x_shape = tf.concat([tf.shape(x)[:-1], tf.constant([self.num_attention_heads, self.attention_head_size], dtype=tf.int32)], axis=0) x = tf.reshape(x, new_x_shape) return tf.transpose(x, perm=(0, 2, 1, 3)) def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=None): mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = shape_list(hidden_states)[1] position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 1) position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64, device=hidden_states.device), 0) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = tf.cast(positional_embedding, query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = tf.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = tf.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = tf.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + tf.cast(attention_mask, attention_scores.dtype) attention_probs = stable_softmax(attention_scores, axis=-1) attention_probs_dropped = self.dropout(attention_probs, training=training) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = attention_probs_dropped @ value_layer context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if self.is_cross_attention: if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.encoder_hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.encoder_hidden_size]) else: if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFBlipTextSelfOutput(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBlipTextAttention(keras.layers.Layer): def __init__(self, config, is_cross_attention=False, **kwargs): super().__init__(**kwargs) self.self = TFBlipTextSelfAttention(config, is_cross_attention, name='self') self.self_output = TFBlipTextSelfOutput(config, name='output') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, output_attentions: Optional[bool]=False, training: Optional[bool]=None): self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training=training) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self', None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, 'self_output', None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) class TFBlipTextIntermediate(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFBlipTextOutput(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBlipTextLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.attention = TFBlipTextAttention(config, name='attention') if self.config.is_decoder: self.crossattention = TFBlipTextAttention(config, is_cross_attention=self.config.is_decoder, name='crossattention') self.intermediate = TFBlipTextIntermediate(config, name='intermediate') self.self_output = TFBlipTextOutput(config, name='output') def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=None): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, training=training) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, training=training) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] intermediate_output = self.intermediate(attention_output) layer_output = self.self_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'intermediate', None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, 'self_output', None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) if getattr(self, 'crossattention', None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) @keras_serializable class TFBlipTextEncoder(keras.layers.Layer): config_class = BlipTextConfig def __init__(self, config, name=None, **kwargs): super().__init__(name=name, **kwargs) self.config = config self.layer = [TFBlipTextLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)] @unpack_inputs def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=None): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.is_decoder else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training=training) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFBlipTextPooler(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFBlipTextPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: BlipTextConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFBlipTextLMPredictionHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFBlipTextPredictionHeadTransform(config, name='transform') self.decoder = keras.layers.Dense(config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name='decoder', use_bias=False) self.config = config def build(self, input_shape=None): self.bias = self.add_weight(name='bias', shape=(self.config.vocab_size,), initializer='zeros', trainable=True) if self.built: return self.built = True if getattr(self, 'transform', None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build([None, None, self.config.hidden_size]) def call(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class TFBlipTextOnlyMLMHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.predictions = TFBlipTextLMPredictionHead(config, name='predictions') def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'predictions', None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) class TFBlipTextPreTrainedModel(TFPreTrainedModel): config_class = BlipTextConfig base_model_prefix = 'bert' _keys_to_ignore_on_load_missing = ['position_ids'] class TFBlipTextModel(TFBlipTextPreTrainedModel): def __init__(self, config, add_pooling_layer=True, name=None, **kwargs): super().__init__(config, name=name, **kwargs) self.config = config self.embeddings = TFBlipTextEmbeddings(config, name='embeddings') self.encoder = TFBlipTextEncoder(config, name='encoder') self.pooler = TFBlipTextPooler(config, name='pooler') if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @tf.function def get_extended_attention_mask(self, attention_mask: tf.Tensor, input_shape: Tuple[int], is_decoder: bool) -> tf.Tensor: if not isinstance(attention_mask, tf.Tensor): attention_mask = tf.convert_to_tensor(attention_mask) if attention_mask.shape.rank == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.shape.rank == 2: if is_decoder: (batch_size, seq_length) = input_shape seq_ids = tf.range(seq_length, dtype=attention_mask.dtype) causal_mask = tf.broadcast_to(seq_ids, (batch_size, seq_length, seq_length)) <= seq_ids[None, :, None] if shape_list(causal_mask)[1] < shape_list(attention_mask)[1]: prefix_seq_len = tf.shape(attention_mask)[1] - tf.shape(causal_mask)[1] causal_mask = tf.concat([tf.ones((batch_size, seq_length, prefix_seq_len), dtype=causal_mask.dtype), causal_mask], axis=-1) extended_attention_mask = tf.cast(causal_mask[:, None, :, :], attention_mask.dtype) * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape)) extended_attention_mask = tf.cast(extended_attention_mask, self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, encoder_embeds: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, past_key_values: Tuple[Tuple[tf.Tensor]] | None=None, use_cache: bool | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, is_decoder: bool=False, training: bool=False) -> Tuple[tf.Tensor] | TFBaseModelOutputWithPoolingAndCrossAttentions: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) (batch_size, seq_length) = input_shape elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] (batch_size, seq_length) = input_shape elif encoder_embeds is not None: input_shape = shape_list(encoder_embeds)[:-1] (batch_size, seq_length) = input_shape else: raise ValueError('You have to specify either input_ids or inputs_embeds or encoder_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length + past_key_values_length)) extended_attention_mask: tf.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, is_decoder) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = shape_list(encoder_hidden_states[0]) else: (encoder_batch_size, encoder_sequence_length, _) = shape_list(encoder_hidden_states) encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = tf.ones(encoder_hidden_shape) encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if encoder_embeds is None: embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) else: embedding_output = encoder_embeds encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'pooler', None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFBlipTextLMHeadModel(TFBlipTextPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['pooler'] _keys_to_ignore_on_load_missing = ['position_ids', 'predictions.decoder.bias'] def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.bert = TFBlipTextModel(config, add_pooling_layer=False, name='bert') self.cls = TFBlipTextOnlyMLMHead(config, name='cls') self.label_smoothing = config.label_smoothing def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING) @unpack_inputs def call(self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=True, training=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, training=training) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :] lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :] shifted_prediction_scores = tf.reshape(shifted_prediction_scores, (-1, self.config.vocab_size)) labels = labels[:, 1:] labels = tf.reshape(labels, (-1,)) one_hot_labels = tf.one_hot(tf.nn.relu(labels), depth=self.config.vocab_size, dtype=tf.float32) loss_fct = keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=self.label_smoothing, reduction='none') masked_positions = tf.cast(tf.not_equal(labels, -100), dtype=tf.float32) lm_loss = loss_fct(one_hot_labels, shifted_prediction_scores) lm_loss *= masked_positions lm_loss = tf.reduce_sum(lm_loss, axis=0) / tf.math.count_nonzero(masked_positions, dtype=tf.float32) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return TFCausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'encoder_hidden_states': model_kwargs.get('encoder_hidden_states', None), 'encoder_attention_mask': model_kwargs.get('encoder_attention_mask', None), 'is_decoder': True} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past)),) return reordered_past def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert', None) is not None: with tf.name_scope(self.bert.name): self.bert.build(None) if getattr(self, 'cls', None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) # File: transformers-main/src/transformers/models/blip/processing_blip.py """""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class BlipProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] valid_kwargs = [] image_processor_class = 'BlipImageProcessor' tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor, tokenizer, **kwargs): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_token_type_ids: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.') if images is None: self.current_processor = self.tokenizer text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) return text_encoding encoding_image_processor = self.image_processor(images, return_tensors=return_tensors) if text is not None: text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) else: text_encoding = None if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/blip_2/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_blip_2': ['Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig'], 'processing_blip_2': ['Blip2Processor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_blip_2'] = ['Blip2Model', 'Blip2VisionModelWithProjection', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2ForImageTextRetrieval', 'Blip2VisionModel', 'Blip2TextModelWithProjection'] if TYPE_CHECKING: from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig from .processing_blip_2 import Blip2Processor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_2 import Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval, Blip2Model, Blip2PreTrainedModel, Blip2QFormerModel, Blip2TextModelWithProjection, Blip2VisionModel, Blip2VisionModelWithProjection else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/blip_2/configuration_blip_2.py """""" import os from typing import Optional, Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class Blip2VisionConfig(PretrainedConfig): model_type = 'blip_2_vision_model' def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.qkv_bias = qkv_bias @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'blip-2': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class Blip2QFormerConfig(PretrainedConfig): model_type = 'blip_2_qformer' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, use_qformer_text_input=False, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.cross_attention_frequency = cross_attention_frequency self.encoder_hidden_size = encoder_hidden_size self.use_qformer_text_input = use_qformer_text_input @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'blip-2': config_dict = config_dict['qformer_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class Blip2Config(PretrainedConfig): model_type = 'blip-2' def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_text_hidden_size=256, image_token_index=None, **kwargs): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.') if qformer_config is None: qformer_config = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.') if text_config is None: text_config = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') self.vision_config = Blip2VisionConfig(**vision_config) self.qformer_config = Blip2QFormerConfig(**qformer_config) text_model_type = text_config['model_type'] if 'model_type' in text_config else 'opt' self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.tie_word_embeddings = self.text_config.tie_word_embeddings self.is_encoder_decoder = self.text_config.is_encoder_decoder self.num_query_tokens = num_query_tokens self.image_text_hidden_size = image_text_hidden_size self.image_token_index = image_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs(cls, vision_config: Blip2VisionConfig, qformer_config: Blip2QFormerConfig, text_config: Optional[PretrainedConfig]=None, **kwargs): return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict() if text_config is not None else None, **kwargs) # File: transformers-main/src/transformers/models/blip_2/convert_blip_2_original_to_pytorch.py """""" import argparse import requests import torch from lavis.models import load_model_and_preprocess from PIL import Image from transformers import AutoTokenizer, BertTokenizer, Blip2Config, Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval, Blip2Processor, Blip2QFormerConfig, Blip2VisionConfig, BlipImageProcessor, OPTConfig, T5Config, set_seed from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def load_demo_image(): url = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') return image def create_rename_keys(config, model_name): rename_keys = [] rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding')) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding')) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight')) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias')) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight')) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias')) if 'itm' in model_name: rename_keys.append(('Qformer.bert.embeddings.word_embeddings.weight', 'embeddings.word_embeddings.weight')) rename_keys.append(('Qformer.bert.embeddings.position_embeddings.weight', 'embeddings.position_embeddings.weight')) rename_keys.append(('vision_proj.weight', 'vision_projection.weight')) rename_keys.append(('vision_proj.bias', 'vision_projection.bias')) rename_keys.append(('text_proj.weight', 'text_projection.weight')) rename_keys.append(('text_proj.bias', 'text_projection.bias')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_v_bias(state_dict, config): for i in range(config.vision_config.num_hidden_layers): q_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias') qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) state_dict[f'vision_model.encoder.layers.{i}.self_attn.qkv.bias'] = qkv_bias def get_blip2_config(model_name, eos_token_id): image_size = 364 if 'coco' in model_name else 224 vision_config = Blip2VisionConfig(image_size=image_size).to_dict() if 'opt-2.7b' in model_name: text_config = OPTConfig.from_pretrained('facebook/opt-2.7b', eos_token_id=eos_token_id).to_dict() elif 'opt-6.7b' in model_name: text_config = OPTConfig.from_pretrained('facebook/opt-6.7b', eos_token_id=eos_token_id).to_dict() elif 't5-xl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 't5-xxl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xxl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 'itm' in model_name: text_config = {} else: raise ValueError('Model name not supported') if 'itm' in model_name: config = Blip2Config(vision_config=vision_config, qformer_config=Blip2QFormerConfig(vocab_size=30523, use_qformer_text_input=True).to_dict()) else: config = Blip2Config(vision_config=vision_config, text_config=text_config) return (config, image_size) @torch.no_grad() def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False, lavis_device='cpu', hf_model_device='cpu'): if 'opt' in model_name: tokenizer = AutoTokenizer.from_pretrained('facebook/opt-2.7b') elif 'itm' in model_name: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', truncation_side='right') tokenizer.add_special_tokens({'bos_token': '[DEC]'}) else: tokenizer = AutoTokenizer.from_pretrained('google/flan-t5-xl') if 'itm' in model_name: eos_token_id = None else: eos_token_id = tokenizer('\n', add_special_tokens=False).input_ids[0] (config, image_size) = get_blip2_config(model_name, eos_token_id=eos_token_id) if 'itm' in model_name: hf_model = Blip2ForImageTextRetrieval(config).eval() else: hf_model = Blip2ForConditionalGeneration(config).eval() model_name_to_original = {'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), 'blip2-itm-vit-g': ('blip2_image_text_matching', 'pretrain'), 'blip2-itm-vit-g-coco': ('blip2_image_text_matching', 'coco')} (name, type) = model_name_to_original[model_name] print('Loading original model...') (original_model, vis_processors, _) = load_model_and_preprocess(name=name, model_type=type, is_eval=True, device=lavis_device) original_model.eval() print('Done!') state_dict = original_model.state_dict() rename_keys = create_rename_keys(config, model_name) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) for (key, val) in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith('Qformer.bert'): key = key.replace('Qformer.bert', 'qformer') if 'attention.self' in key: key = key.replace('self', 'attention') if 'opt_proj' in key: key = key.replace('opt_proj', 'language_projection') if 't5_proj' in key: key = key.replace('t5_proj', 'language_projection') if key.startswith('opt'): key = key.replace('opt', 'language') if key.startswith('t5'): key = key.replace('t5', 'language') state_dict[key] = val read_in_q_v_bias(state_dict, config) (missing_keys, unexpected_keys) = hf_model.load_state_dict(state_dict, strict=False) assert len(missing_keys) == 0 if 'itm' in model_name: unexpected_keys = list(filter(lambda x: not x.startswith('Qformer.cls'), unexpected_keys)) assert unexpected_keys == ['temp', 'qformer.embeddings.position_ids'] else: assert unexpected_keys == ['qformer.embeddings.position_ids'] image = load_demo_image() original_pixel_values = vis_processors['eval'](image).unsqueeze(0).to(lavis_device) image_processor = BlipImageProcessor(size={'height': image_size, 'width': image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD) processor = Blip2Processor(image_processor=image_processor, tokenizer=tokenizer) pixel_values = processor(images=image, return_tensors='pt').pixel_values.to(hf_model_device) assert torch.allclose(pixel_values, original_pixel_values.to(pixel_values.device)) original_model.to(lavis_device) hf_model.to(hf_model_device) if 'itm' in model_name: caption = 'a large fountain spewing water into the air' input_ids = tokenizer([caption], return_tensors='pt').input_ids.to(hf_model_device) attention_mask = processor(text=caption, return_tensors='pt').attention_mask.to(hf_model_device) with torch.no_grad(): original_logits = original_model({'image': original_pixel_values, 'text_input': [caption]}, match_head='itm') logits = hf_model(pixel_values=original_pixel_values, input_ids=input_ids, attention_mask=attention_mask, use_image_text_matching_head=True) assert original_logits.shape == logits.logits_per_image.shape print('First values of original logits:', original_logits[0, :3]) print('First values of HF logits:', logits.logits_per_image[0, :3]) target_dtype = logits.logits_per_image.dtype assert torch.allclose(original_logits.to(target_dtype), logits.logits_per_image, atol=0.0001) original_itm_scores = torch.nn.functional.softmax(original_logits, dim=1) itm_scores = torch.nn.functional.softmax(logits.logits_per_image, dim=1) assert torch.allclose(original_itm_scores.to(target_dtype), itm_scores, atol=0.0001) print('Looks ok!') with torch.no_grad(): original_logits = original_model({'image': original_pixel_values, 'text_input': [caption]}, match_head='itc') logits = hf_model(pixel_values=original_pixel_values, input_ids=input_ids, attention_mask=attention_mask, use_image_text_matching_head=False) assert original_logits.shape == logits.logits_per_image.shape print('First values of original logits:', original_logits[0, :3]) print('First values of HF logits:', logits.logits_per_image[0, :3]) target_dtype = logits.logits_per_image.dtype assert torch.allclose(original_logits.to(target_dtype), logits.logits_per_image, atol=0.0001) print('Looks ok!') else: input_ids = tokenizer(['\n'], return_tensors='pt').input_ids.to(hf_model_device) with torch.no_grad(): if 'opt' in model_name: original_logits = original_model({'image': original_pixel_values, 'text_input': ['']}).logits logits = hf_model(pixel_values, input_ids).logits else: original_logits = original_model({'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']}).logits labels = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100) logits = hf_model(pixel_values, input_ids, labels=labels).logits assert original_logits.shape == logits.shape print('First values of original logits:', original_logits[0, :3, :3]) print('First values of HF logits:', logits[0, :3, :3]) assert torch.allclose(original_logits.to(logits.device), logits, atol=0.0001) print('Looks ok!') print('Generating a caption...') prompt = 'Question: what object is in this image? Answer:' input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(hf_model_device) set_seed(42) original_outputs = original_model.generate({'image': original_pixel_values, 'prompt': prompt}, use_nucleus_sampling=True, max_length=50) outputs = hf_model.generate(pixel_values, input_ids, do_sample=True, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1) output_text = processor.batch_decode(outputs, skip_special_tokens=True) output_text = [text.strip() for text in output_text] print('Original generation:', original_outputs) print('HF generation:', output_text) if pytorch_dump_folder_path is not None: processor.save_pretrained(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}') hf_model.push_to_hub(f'nielsr/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() choices = ['blip2-opt-2.7b', 'blip2-opt-6.7b', 'blip2-opt-2.7b-coco', 'blip2-opt-6.7b-coco', 'blip2-flan-t5-xl', 'blip2-flan-t5-xl-coco', 'blip2-flan-t5-xxl', 'blip2-itm-vit-g', 'blip2-itm-vit-g-coco'] parser.add_argument('--model_name', default='blip2-opt-2.7b', choices=choices, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting') parser.add_argument('--lavis_device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.') parser.add_argument('--hf_model_device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.') args = parser.parse_args() convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.lavis_device, args.hf_model_device) # File: transformers-main/src/transformers/models/blip_2/modeling_blip_2.py """""" import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'Salesforce/blip2-opt-2.7b' @dataclass class Blip2ForConditionalGenerationModelOutput(ModelOutput): loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys())) @dataclass class Blip2ImageTextMatchingModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) @dataclass class Blip2TextModelOutput(ModelOutput): text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class Blip2VisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class Blip2VisionEmbeddings(nn.Module): def __init__(self, config: Blip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, _, height, width) = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings class Blip2Attention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) (query_states, key_states, value_states) = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs class Blip2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Blip2EncoderLayer(nn.Module): def __init__(self, config: Blip2Config): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Blip2Attention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Blip2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Blip2PreTrainedModel(PreTrainedModel): config_class = Blip2Config base_model_prefix = 'blip' supports_gradient_checkpointing = True _no_split_modules = ['Blip2Attention', 'Blip2QFormerMultiHeadAttention', 'Blip2TextEmbeddings', 'T5Block', 'OPTDecoderLayer'] _skip_keys_device_placement = 'past_key_values' _keep_in_fp32_modules = ['wo'] def _init_weights(self, module): factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() if isinstance(module, Blip2VisionEmbeddings): if hasattr(self.config, 'vision_config') and (not isinstance(self.config, Blip2VisionConfig)): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BLIP_2_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`Blip2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BLIP_2_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for\n details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' BLIP_2_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n\n To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5\n Training](./t5#training).\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLIP_2_TEXT_WITH_PROJECTION_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' BLIP_2_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for\n details.\n\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be\n provided to serve as text prompt, which the language model can continue.\n\n Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an\n encoder-decoder language model (like T5) is used.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n Only relevant in case an encoder-decoder language model (like T5) is used.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' BLIP2_IMAGE_TEXT_RETRIEVAL_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for\n details.\n\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be\n provided to serve as text prompt, which the language model can continue.\n\n Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n use_image_text_matching_head (`bool`, *optional*):\n Whether to return the Image-Text Matching or Contrastive scores.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class Blip2Encoder(nn.Module): def __init__(self, config: Blip2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = 'pixel_values' config_class = Blip2VisionConfig def __init__(self, config: Blip2VisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = Blip2VisionEmbeddings(config) self.encoder = Blip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings class Blip2QFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class Blip2QFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class Blip2QFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention) self.output = Blip2QFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.attention(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class Blip2QFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class Blip2QFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class Blip2QFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Blip2QFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False if config.use_qformer_text_input: self.intermediate = Blip2QFormerIntermediate(config) self.output = Blip2QFormerOutput(config) self.intermediate_query = Blip2QFormerIntermediate(config) self.output_query = Blip2QFormerOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError('encoder_hidden_states must be given for cross-attention layers') cross_attention_outputs = self.crossattention(query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions) query_attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class Blip2QFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, 'gradient_checkpointing', False) and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class Blip2TextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') def forward(self, input_ids: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, :seq_length] if input_ids is not None: input_ids = input_ids.to(self.word_embeddings.weight.device) embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds return embeddings class Blip2QFormerModel(Blip2PreTrainedModel): def __init__(self, config: Blip2QFormerConfig): super().__init__(config) self.config = config self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.encoder = Blip2QFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape)) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, query_embeds: torch.FloatTensor, query_length: Optional[int]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 query_length = query_length if query_length is not None else query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.layernorm(query_embeds) embedding_output = self.dropout(embedding_output) input_shape = embedding_output.size()[:-1] (batch_size, seq_length) = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states[0].size() else: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n BLIP-2 Model for generating text and image features. The model consists of a vision encoder, Querying Transformer\n (Q-Former) and a language model.\n ', BLIP_2_START_DOCSTRING) class Blip2Model(Blip2PreTrainedModel): config_class = Blip2Config main_input_name = 'pixel_values' def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config, attn_implementation=config._attn_implementation) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config, attn_implementation=config._attn_implementation) if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f'language_model.{k}' for k in language_model._tied_weights_keys] self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared @add_start_docstrings_to_model_forward(BLIP_2_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.use_decoder_only_language_model: text_outputs = self.language_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) else: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) text_outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) return text_outputs @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) return vision_outputs @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) def get_qformer_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return query_outputs @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig) def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_output = query_outputs[0] language_model_inputs = self.language_projection(query_output) language_model_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) inputs_embeds = torch.cat([language_model_inputs, inputs_embeds], dim=1) if attention_mask is None: attention_mask = torch.ones_like(input_ids) expected_device = language_model_attention_mask.device attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(expected_device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return (loss,) + output if loss is not None else output return Blip2ForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @add_start_docstrings('\n BLIP-2 Text Model with a projection layer on top (a linear layer on top of the pooled output).\n ', BLIP_2_START_DOCSTRING) class Blip2TextModelWithProjection(Blip2PreTrainedModel): supports_gradient_checkpointing = False _keep_in_fp32_modules = [] def __init__(self, config: Blip2Config): super().__init__(config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.embeddings = Blip2TextEmbeddings(config.qformer_config) self.qformer = Blip2QFormerModel(config.qformer_config) self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.post_init() @add_start_docstrings_to_model_forward(BLIP_2_TEXT_WITH_PROJECTION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2TextModelOutput, config_class=Blip2Config) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Blip2TextModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict query_embeds = self.embeddings(input_ids=input_ids, position_ids=position_ids) text_outputs = self.qformer(query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[0] if not return_dict else text_outputs.last_hidden_state text_embeds = self.text_projection(pooled_output) text_embeds = nn.functional.normalize(text_embeds, dim=-1) if not return_dict: outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] return tuple((output for output in outputs if output is not None)) return Blip2TextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions) @add_start_docstrings('\n BLIP-2 Vision Model with a projection layer on top (a linear layer on top of the pooled output).\n ', BLIP_2_START_DOCSTRING) class Blip2VisionModelWithProjection(Blip2PreTrainedModel): main_input_name = 'pixel_values' _keep_in_fp32_modules = [] def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel(config.qformer_config) self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2VisionModelOutput, config_class=Blip2Config) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Blip2VisionModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[0] if not return_dict else vision_outputs.last_hidden_state image_attention_mask = torch.ones(pooled_output.size()[:-1], dtype=torch.long, device=pooled_output.device) query_tokens = self.query_tokens.expand(pooled_output.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=pooled_output, encoder_attention_mask=image_attention_mask, return_dict=return_dict) embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state image_embeds = self.vision_projection(embeds) image_embeds = nn.functional.normalize(image_embeds, dim=-1) if not return_dict: outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) return Blip2VisionModelOutput(image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @add_start_docstrings('\n BLIP-2 Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n\n \n\n Note that Flan-T5 checkpoints cannot be cast to float16. They are pre-trained using bfloat16.\n\n \n ', BLIP_2_START_DOCSTRING) class Blip2ForConditionalGeneration(Blip2PreTrainedModel): config_class = Blip2Config main_input_name = 'pixel_values' def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = Blip2QFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config, attn_implementation=config._attn_implementation) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config, attn_implementation=config._attn_implementation) if language_model._tied_weights_keys is not None: self._tied_weights_keys = [f'language_model.{k}' for k in language_model._tied_weights_keys] self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1): logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.') if hasattr(self.language_model, '_hf_hook'): self.language_model._hf_hook.io_same_device = True @add_start_docstrings_to_model_forward(BLIP_2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2ForConditionalGenerationModelOutput, config_class=Blip2VisionConfig) def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_output = query_outputs[0] language_model_inputs = self.language_projection(query_output) language_model_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'image_token_index', None) is not None: special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) else: logger.warning_once('Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return (loss,) + output if loss is not None else output return Blip2ForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() batch_size = pixel_values.shape[0] image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state language_model_inputs = self.language_projection(query_output) language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) if input_ids is None: input_ids = torch.LongTensor([[self.config.text_config.bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'image_token_index', None) is not None: special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) if not self.language_model.config.is_encoder_decoder: generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1 generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1] outputs = self.language_model.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generate_kwargs) if not self.language_model.config.is_encoder_decoder: bos_tokens = torch.LongTensor([[self.config.text_config.bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if not isinstance(outputs, torch.Tensor): outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1) else: outputs = torch.cat([bos_tokens, outputs], dim=-1) return outputs @add_start_docstrings('\n BLIP-2 Model with a vision and text projector, and a classification head on top. The model is used in the context\n of image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to\n the image.\n ', BLIP_2_START_DOCSTRING) class Blip2ForImageTextRetrieval(Blip2PreTrainedModel): main_input_name = 'pixel_values' _keep_in_fp32_modules = [] def __init__(self, config: Blip2Config): super().__init__(config) self.vision_model = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.embeddings = Blip2TextEmbeddings(config.qformer_config) self.qformer = Blip2QFormerModel(config.qformer_config) self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size) self.itm_head = nn.Linear(config.qformer_config.hidden_size, 2) self.post_init() @add_start_docstrings_to_model_forward(BLIP2_IMAGE_TEXT_RETRIEVAL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Blip2ImageTextMatchingModelOutput, config_class=Blip2Config) def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, use_image_text_matching_head: Optional[bool]=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Blip2ImageTextMatchingModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) if use_image_text_matching_head: query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(query_tokens.device) attention_mask = torch.cat([query_attention_mask, attention_mask], dim=1) query_embeds = self.embeddings(input_ids=input_ids, query_embeds=query_tokens) text_outputs = self.qformer(query_embeds=query_embeds, query_length=query_tokens.shape[1], attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict) text_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state output = self.itm_head(text_embeds[:, :query_tokens.size(1), :]) logits_per_image = output.mean(dim=1) logits_per_text = logits_per_image.t() else: query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=return_dict) image_embeds = query_outputs[0] if not return_dict else query_outputs.last_hidden_state query_embeds = self.embeddings(input_ids=input_ids) text_outputs = self.qformer(query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, return_dict=return_dict) question_embeds = text_outputs[0] if not return_dict else text_outputs.last_hidden_state image_embeds = nn.functional.normalize(self.vision_projection(image_embeds), dim=-1) text_embeds = nn.functional.normalize(self.text_projection(question_embeds[:, 0, :]), dim=-1) logits_per_image = torch.matmul(image_embeds, text_embeds.t()) (logits_per_image, _) = logits_per_image.max(dim=1) logits_per_text = logits_per_image.t() if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return output return Blip2ImageTextMatchingModelOutput(logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) # File: transformers-main/src/transformers/models/blip_2/processing_blip_2.py """""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType, logging logger = logging.get_logger(__name__) class Blip2Processor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] valid_kwargs = ['num_query_tokens'] image_processor_class = 'BlipImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs): tokenizer.return_token_type_ids = False self.current_processor = image_processor self.image_token = AddedToken('', normalized=False, special=True) tokenizer.add_tokens([self.image_token], special_tokens=True) self.num_query_tokens = num_query_tokens super().__init__(image_processor, tokenizer) def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_token_type_ids: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.') if images is None: self.current_processor = self.tokenizer text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) return text_encoding encoding_image_processor = self.image_processor(images, return_tensors=return_tensors) if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise ValueError('Invalid input text. Please provide a string, or a list of strings') text_encoding = {} _text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=None, **kwargs) if self.num_query_tokens is not None: image_tokens = self.image_token.content * self.num_query_tokens image_token_encoding = self.tokenizer([image_tokens], add_special_tokens=False, return_tensors=None) for k in _text_encoding: text_encoding[k] = [img_encoding + txt_encoding for (img_encoding, txt_encoding) in zip(image_token_encoding[k], _text_encoding[k])] else: text_encoding = _text_encoding logger.warning_once('Expanding inputs for image tokens in BLIP-2 should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors) else: text_encoding = None if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/bloom/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_bloom': ['BloomConfig', 'BloomOnnxConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_bloom_fast'] = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bloom'] = ['BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_bloom'] = ['FlaxBloomForCausalLM', 'FlaxBloomModel', 'FlaxBloomPreTrainedModel'] if TYPE_CHECKING: from .configuration_bloom import BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bloom import FlaxBloomForCausalLM, FlaxBloomModel, FlaxBloomPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bloom/configuration_bloom.py """""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging logger = logging.get_logger(__name__) class BloomConfig(PretrainedConfig): model_type = 'bloom' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head'} def __init__(self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, apply_residual_connection_post_layernorm=False, hidden_dropout=0.0, attention_dropout=0.0, pretraining_tp=1, slow_but_exact=False, **kwargs): self.vocab_size = vocab_size n_embed = kwargs.pop('n_embed', None) self.hidden_size = hidden_size if n_embed is None else n_embed self.n_layer = n_layer self.n_head = n_head self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.pretraining_tp = pretraining_tp self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.slow_but_exact = slow_but_exact super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) class BloomOnnxConfig(OnnxConfigWithPast): torch_onnx_minimum_version = version.parse('1.12') def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False): super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) if not getattr(self._config, 'pad_token_id', None): self._config.pad_token_id = 0 @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs', inverted_values_shape=True) common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'} else: common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'} return common_inputs @property def num_layers(self) -> int: return self._config.n_layer @property def num_attention_heads(self) -> int: return self._config.n_head @property def atol_for_validation(self) -> float: return 0.001 def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizer', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional['TensorType']=None) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']}) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 head_dim = self._config.hidden_size // self.num_attention_heads past_key_shape = (batch * self.num_attention_heads, head_dim, past_key_values_length) past_value_shape = (batch * self.num_attention_heads, past_key_values_length, head_dim) ordered_inputs['past_key_values'] = [(torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers)] ordered_inputs['attention_mask'] = common_inputs['attention_mask'] if self.use_past: mask_dtype = ordered_inputs['attention_mask'].dtype ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13 # File: transformers-main/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py """""" import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() WEIGHTS_TO_AVERAGE_ENDSWITH = ['word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias'] WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN = ['mlp.dense_4h_to_h.weight', 'self_attention.dense.weight'] def layer_name_mapping(key, file): layer_rename_map = {'word_embeddings.weight': 'word_embeddings.weight', 'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight', 'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias', 'weight': 'ln_f.weight', 'bias': 'ln_f.bias'} if key in layer_rename_map: return layer_rename_map[key] layer_number = int(re.match('.*layer_(\\d*).*', file)[1]) layer_number -= 3 return f'h.{layer_number}.' + key def get_dtype_size(dtype): if dtype == torch.bool: return 1 / 8 bit_search = re.search('[^\\d](\\d+)$', str(dtype)) if bit_search is None: raise ValueError(f'`dtype` is not a valid dtype: {dtype}.') bit_size = int(bit_search.groups()[0]) return bit_size // 8 def convert_bloom_checkpoint_to_pytorch(bloom_checkpoint_path, bloom_config_file, pytorch_dump_folder_path, shard_model, pretraining_tp): if bloom_config_file == '': config = BloomConfig() else: config = BloomConfig.from_json_file(bloom_config_file) if shard_model: file_names = os.listdir(bloom_checkpoint_path) file_names = sorted(filter(lambda s: s.startswith('layer') and 'model_00' in s, file_names)) index_dict = {'weight_map': {}, 'metadata': {}} total_size = 0 missing_keys = None config = BloomConfig() for (j, file) in enumerate(file_names): print('Processing file: {}'.format(file)) tensors = None for i in range(pretraining_tp): f_name = file.replace('model_00', f'model_0{i}') temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location='cpu') keys = list(temp.keys()) for key in keys: temp[layer_name_mapping(key, file)] = temp.pop(key) if tensors is None: tensors = temp else: for key in tensors.keys(): if any((key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH)): tensors[key] += temp[key] else: cat_dim = 1 if any((text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN)) else 0 tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim) for key in tensors.keys(): if any((key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH)): tensors[key] = tensors[key] / pretraining_tp torch.save(tensors, os.path.join(pytorch_dump_folder_path, 'pytorch_model_{}-of-{}.bin'.format(str(j + 1).zfill(5), str(len(file_names)).zfill(5)))) for key in tensors.keys(): value = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype) if key not in index_dict['weight_map']: index_dict['weight_map'][key] = 'pytorch_model_{}-of-{}.bin'.format(str(j + 1).zfill(5), str(len(file_names)).zfill(5)) config = BloomConfig() pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME index_dict['metadata']['total_size'] = total_size with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string()) with open(os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME + '.index.json'), 'w', encoding='utf-8') as f: json_config = json.dumps(index_dict, indent=2, sort_keys=True) + '\n' f.write(json_config) else: model = BloomModel(config) file_names = os.listdir(bloom_checkpoint_path) file_names = sorted(filter(lambda s: s.startswith('layer') and 'model_00' in s, file_names)) missing_keys = None for (i, file) in enumerate(file_names): tensors = None for i in range(pretraining_tp): f_name = file.replace('model_00', f'model_0{i}') temp = torch.load(os.path.join(bloom_checkpoint_path, f_name), map_location='cpu') keys = list(temp.keys()) for key in keys: temp[layer_name_mapping(key, file)] = temp.pop(key) if tensors is None: tensors = temp else: for key in tensors.keys(): if any((key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH)): tensors[key] += temp[key] else: cat_dim = 1 if any((text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN)) else 0 tensors[key] = torch.cat([tensors[key], temp[key]], dim=cat_dim) for key in tensors.keys(): if any((key.endswith(end) for end in WEIGHTS_TO_AVERAGE_ENDSWITH)): tensors[key] = tensors[key] / pretraining_tp other_keys = model.load_state_dict(tensors, strict=False) assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected' if missing_keys is None: missing_keys = set(other_keys.missing_keys) else: missing_keys = missing_keys.intersection(set(other_keys.missing_keys)) assert not missing_keys, f'The keys {missing_keys} are missing' os.makedirs(pytorch_dump_folder_path, exist_ok=True) pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}') if config.torch_dtype is not None: model = model.to(config.torch_dtype) torch.save(model.state_dict(), pytorch_weights_dump_path) print(f'Save configuration file to {pytorch_config_dump_path}') with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--bloom_config_file', default='', type=str, help='An optional config json file corresponding to the pre-trained model. \nThis specifies the model architecture.') parser.add_argument('--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint') parser.add_argument('--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n') args = parser.parse_args() convert_bloom_checkpoint_to_pytorch(args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp) # File: transformers-main/src/transformers/models/bloom/modeling_bloom.py """""" import math import warnings from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ...cache_utils import Cache, DynamicCache, StaticCache from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_bloom import BloomConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'bigscience/bloom-560m' _CONFIG_FOR_DOC = 'BloomConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: (batch_size, seq_length) = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: out = F.dropout(x, p=prob, training=training) out = residual + out return out def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor: return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor: x = x[0] tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) return ff * g class GeLUFunction(torch.autograd.Function): @staticmethod def forward(ctx, input: torch.Tensor) -> torch.Tensor: ctx.save_for_backward(input) return bloom_gelu_forward(input) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: input = ctx.saved_tensors tmp = bloom_gelu_back(grad_output, input) return tmp class BloomGelu(nn.Module): def __init__(self): super().__init__() def forward(self, x: torch.Tensor) -> torch.Tensor: if self.training: return GeLUFunction.apply(x) else: return bloom_gelu_forward(x) class BloomAttention(nn.Module): def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.hidden_size = config.hidden_size self.num_heads = config.n_head self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = 1.0 self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) self.dense = nn.Linear(self.hidden_size, self.hidden_size) self.attention_dropout = nn.Dropout(config.attention_dropout) def _reshape(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: (batch_size, seq_length, three_times_hidden_size) = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) query_layer = fused_qkv[..., 0, :].transpose(1, 2) key_layer = fused_qkv[..., 1, :].transpose(1, 2) value_layer = fused_qkv[..., 2, :].transpose(1, 2) return (query_layer, key_layer, value_layer) def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: (batch_size_and_num_heads, seq_length, _) = x.shape batch_size = batch_size_and_num_heads // self.num_heads x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) x = x.permute(0, 2, 1, 3) return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None): (batch_size, q_length, _) = hidden_states.shape fused_qkv = self.query_key_value(hidden_states) (query_layer, key_layer, value_layer) = self._reshape(fused_qkv) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} (key_layer, value_layer) = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) query_layer = query_layer.reshape(batch_size * self.num_heads, -1, self.head_dim) key_layer = key_layer.reshape(batch_size * self.num_heads, -1, self.head_dim).transpose(-1, -2) value_layer = value_layer.reshape(batch_size * self.num_heads, -1, self.head_dim) attention_scores = alibi.baddbmm(batch1=query_layer, batch2=key_layer, beta=self.beta, alpha=self.inv_norm_factor) attn_weights = attention_scores.view(batch_size, self.num_heads, q_length, -1) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_layer.shape[-1]] attn_weights = attn_weights + causal_mask attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_layer.dtype) attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, -1) context_layer = torch.bmm(attention_probs_reshaped, value_layer) context_layer = self._merge_heads(context_layer) if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear(context_layer[:, :, int(i * slices):int((i + 1) * slices)], self.dense.weight[:, int(i * slices):int((i + 1) * slices)]) else: output_tensor = self.dense(context_layer) output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) outputs = (output_tensor, layer_past) if output_attentions: outputs += (attention_probs,) return outputs class BloomMLP(nn.Module): def __init__(self, config: BloomConfig): super().__init__() hidden_size = config.hidden_size self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size) self.gelu_impl = BloomGelu() self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size) self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) if self.pretraining_tp > 1 and self.slow_but_exact: intermediate_output = torch.zeros_like(residual) slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp for i in range(self.pretraining_tp): intermediate_output = intermediate_output + F.linear(hidden_states[:, :, int(i * slices):int((i + 1) * slices)], self.dense_4h_to_h.weight[:, int(i * slices):int((i + 1) * slices)]) else: intermediate_output = self.dense_4h_to_h(hidden_states) output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training) return output class BloomBlock(nn.Module): def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None): super().__init__() hidden_size = config.hidden_size self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.num_heads = config.n_head self.self_attention = BloomAttention(config, layer_idx) self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(config) self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None): layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attn_outputs = self.self_attention(layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) attention_output = attn_outputs[0] outputs = attn_outputs[1:] layernorm_output = self.post_attention_layernorm(attention_output) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output output = self.mlp(layernorm_output, residual) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs class BloomPreTrainedModel(PreTrainedModel): config_class = BloomConfig base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['BloomBlock'] _skip_keys_device_placement = 'past_key_values' _supports_cache_class = True _supports_static_cache = True _supports_quantized_cache = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BLOOM_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BloomConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BLOOM_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`\n (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.', BLOOM_START_DOCSTRING) class BloomModel(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.n_head self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.h = nn.ModuleList([BloomBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False self.post_init() def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: return build_alibi_tensor(attention_mask, num_heads, dtype) def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once('Using `past_key_values` as a tuple is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') (batch_size, seq_length, _) = inputs_embeds.shape past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 seq_length_with_past = seq_length + past_length if cache_position is None: cache_position = torch.arange(past_length, past_length + seq_length, device=inputs_embeds.device) head_mask = self.get_head_mask(head_mask, self.config.n_layer) hidden_states = self.word_embeddings_layernorm(inputs_embeds) next_decoder_cache = None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) for (i, block) in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, alibi, causal_mask, past_key_values, head_mask[i], use_cache, output_attentions, cache_position) else: outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position) hidden_states = outputs[0] if use_cache: next_decoder_cache = outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('\n The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', BLOOM_START_DOCSTRING) class BloomForCausalLM(BloomPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: BloomConfig): super().__init__(config) self.transformer = BloomModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask is not None: target_length = past_key_values.get_max_length() (batch_size, seq_length) = attention_mask.shape diff = target_length - seq_length new_attn_mask = torch.zeros(batch_size, diff, device=attention_mask.device, dtype=attention_mask.dtype) attention_mask = torch.cat([attention_mask, new_attn_mask], dim=-1) model_inputs.update({'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() (batch_size, seq_length, vocab_size) = shift_logits.shape loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def _reorder_cache(self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: device_to_beam_idx = {past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past} reordered_past = tuple(((layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device])) for layer_past in past)) return reordered_past @add_start_docstrings('\n The Bloom Model transformer with a sequence classification head on top (linear layer).\n\n [`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', BLOOM_START_DOCSTRING) class BloomForSequenceClassification(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Bloom Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BLOOM_START_DOCSTRING) class BloomForTokenClassification(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: if deprecated_arguments.pop('position_ids', False) is not False: warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning) if len(deprecated_arguments) > 0: raise ValueError(f'Got unexpected arguments: {deprecated_arguments}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) (batch_size, seq_length) = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', BLOOM_START_DOCSTRING) class BloomForQuestionAnswering(BloomPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = BloomModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.post_init() @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/bloom/modeling_flax_bloom.py """""" import math from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, dot_product_attention_weights, make_causal_mask from flax.linen.activation import tanh from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutput from ...modeling_flax_utils import FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_bloom import BloomConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'bigscience/bloom' _CONFIG_FOR_DOC = 'BloomConfig' BLOOM_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`BloomConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' BLOOM_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`BloomTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def build_alibi_tensor(attention_mask: jnp.ndarray, num_heads: int, dtype: Optional[jnp.dtype]=jnp.float32): (batch_size, seq_length) = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = jnp.array(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), dtype=jnp.float32) powers = jnp.arange(1, 1 + closest_power_of_2, dtype=jnp.float32) slopes = jax.lax.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = jnp.array(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), dtype=jnp.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = jnp.arange(1, 1 + 2 * num_remaining_heads, 2, dtype=jnp.float32) slopes = jnp.cat([slopes, jax.lax.pow(extra_base, extra_powers)], axis=0) arange_tensor = ((attention_mask.cumsum(axis=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor alibi = jnp.expand_dims(alibi, axis=2) return jnp.asarray(alibi, dtype) class FlaxBloomAttention(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.hidden_size = self.config.hidden_size self.num_heads = self.config.n_head self.head_dim = self.hidden_size // self.num_heads self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'`hidden_size` must be divisible by `num_heads` (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') dense = partial(nn.Dense, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.query_key_value = dense(self.hidden_size * 3) self.dense = dense(self.hidden_size) self.resid_dropout = nn.Dropout(rate=self.config.hidden_dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:-1] + (self.num_heads, self.head_dim * 3)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, residual, alibi, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): (batch_size, seq_length) = hidden_states.shape[:2] fused_qkv = self.query_key_value(hidden_states) fused_qkv = self._split_heads(fused_qkv) (query, key, value) = jnp.split(fused_qkv, 3, axis=-1) causal_attention_mask = make_causal_mask(attention_mask, dtype='bool') causal_attention_mask_shift = self.variables['cache']['cache_index'] if self.has_variable('cache', 'cached_key') else 0 if self.has_variable('cache', 'cached_key'): max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_attention_mask = jax.lax.dynamic_slice(causal_attention_mask, (0, 0, causal_attention_mask_shift, 0), (1, 1, seq_length, max_decoder_length)) causal_attention_mask = jnp.broadcast_to(causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape) attention_mask = combine_masks(attention_mask, causal_attention_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng('dropout') if self.has_variable('cache', 'cached_key') or init_cache: (key, value, attention_mask) = self._concatenate_to_cache(key, value, query, attention_mask) mask_value = jnp.finfo(self.dtype).min attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, mask_value).astype(self.dtype)) attention_bias = attention_bias + alibi attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=attention_dtype) if self.attention_softmax_in_fp32: attn_weights = attn_weights.astype(self.dtype) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.dense(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) attn_output = attn_output + residual outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class BloomGELU(nn.Module): def setup(self): self.dtype = jnp.float32 def __call__(self, x): return x * 0.5 * (1.0 + tanh(0.79788456 * x * (1 + 0.044715 * x * x))) class FlaxBloomMLP(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.dense_h_to_4h = nn.Dense(4 * hidden_size, dtype=self.dtype, kernel_init=kernel_init) self.dense_4h_to_h = nn.Dense(hidden_size, dtype=self.dtype, kernel_init=kernel_init) self.hidden_dropout = nn.Dropout(self.config.hidden_dropout) self.act = BloomGELU() def __call__(self, hidden_states, residual, deterministic: bool=True): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) intermediate_output = self.dense_4h_to_h(hidden_states) intermediate_output = intermediate_output + residual hidden_states = self.hidden_dropout(intermediate_output, deterministic=deterministic) return hidden_states class FlaxBloomBlock(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.input_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.self_attention = FlaxBloomAttention(self.config, dtype=self.dtype) self.post_attention_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxBloomMLP(self.config, dtype=self.dtype) self.apply_residual_connection_post_layernorm = self.config.apply_residual_connection_post_layernorm self.hidden_dropout = self.config.hidden_dropout def __call__(self, hidden_states, alibi, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): layernorm_output = self.input_layernorm(hidden_states) if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states attn_outputs = self.self_attention(layernorm_output, residual=residual, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) attention_output = attn_outputs[0] outputs = attn_outputs[1:] post_layernorm = self.post_attention_layernorm(attention_output) if self.apply_residual_connection_post_layernorm: residual = post_layernorm else: residual = attention_output output = self.mlp(post_layernorm, residual, deterministic=deterministic) outputs = (output,) + outputs return outputs class FlaxBloomPreTrainedModel(FlaxPreTrainedModel): config_class = BloomConfig base_model_prefix = 'transformer' module_class: nn.Module = None def __init__(self, config: BloomConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) def __call__(self, input_ids, attention_mask=None, past_key_values: dict=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, sequence_length) = input_ids.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxBloomBlockCollection(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxBloomBlock(self.config, name=str(layer_number), dtype=self.dtype) for layer_number in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, alibi, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for layer_number in range(self.config.num_hidden_layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = self.layers[layer_number](hidden_states, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxBloomModule(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size self.word_embeddings = nn.Embed(self.config.vocab_size, self.embed_dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.word_embeddings_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.h = FlaxBloomBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__(self, input_ids=None, attention_mask=None, deterministic=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) alibi = build_alibi_tensor(attention_mask, self.config.n_head, dtype=hidden_states.dtype) outputs = self.h(hidden_states, alibi=alibi, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple((v for v in [outputs[0], outputs[-1]] if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1]) @add_start_docstrings('The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.', BLOOM_START_DOCSTRING) class FlaxBloomModel(FlaxBloomPreTrainedModel): module_class = FlaxBloomModule append_call_sample_docstring(FlaxBloomModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxBloomForCausalLMModule(nn.Module): config: BloomConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxBloomModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.transformer(input_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables['params']['word_embeddings']['embedding'].T lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', BLOOM_START_DOCSTRING) class FlaxBloomForCausalLM(FlaxBloomPreTrainedModel): module_class = FlaxBloomForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values return model_kwargs append_call_sample_docstring(FlaxBloomForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/bloom/tokenization_bloom_fast.py """""" import pickle from typing import Optional, Tuple from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'tokenizer_file': 'tokenizer.json'} class BloomTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = None def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='', bos_token='', eos_token='', pad_token='', add_prefix_space=False, clean_up_tokenization_spaces=False, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer) decoder_state = pickle.dumps(self.backend_tokenizer.decoder) if add_prefix_space: pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state) self.backend_tokenizer.decoder = pickle.loads(decoder_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if not (self.add_prefix_space or not is_split_into_words): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if not (self.add_prefix_space or not is_split_into_words): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/bridgetower/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_bridgetower': ['BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig'], 'processing_bridgetower': ['BridgeTowerProcessor']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_bridgetower'] = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bridgetower'] = ['BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel'] if TYPE_CHECKING: from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/bridgetower/configuration_bridgetower.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BridgeTowerVisionConfig(PretrainedConfig): model_type = 'bridgetower_vision_model' def __init__(self, hidden_size=768, num_hidden_layers=12, num_channels=3, patch_size=16, image_size=288, initializer_factor=1, layer_norm_eps=1e-05, stop_gradient=False, share_layernorm=True, remove_last_layer=False, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.stop_gradient = stop_gradient self.share_layernorm = share_layernorm self.remove_last_layer = remove_last_layer @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'bridgetower': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class BridgeTowerTextConfig(PretrainedConfig): model_type = 'bridgetower_text_model' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, initializer_factor=1, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_factor = initializer_factor self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'bridgetower': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class BridgeTowerConfig(PretrainedConfig): model_type = 'bridgetower' def __init__(self, share_cross_modal_transformer_layers=True, hidden_act='gelu', hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type='add', num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs): _ = kwargs.pop('text_config_dict', None) _ = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers self.hidden_act = hidden_act self.hidden_size = hidden_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.share_link_tower_layers = share_link_tower_layers self.link_tower_type = link_tower_type self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.tie_word_embeddings = tie_word_embeddings self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.') self.text_config = BridgeTowerTextConfig(**text_config) self.vision_config = BridgeTowerVisionConfig(**vision_config) @classmethod def from_text_vision_configs(cls, text_config: BridgeTowerTextConfig, vision_config: BridgeTowerVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/bridgetower/image_processing_bridgetower.py """""" from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, center_crop, pad, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def max_across_indices(values: Iterable[Any]) -> List[Any]: return [max(values_i) for values_i in zip(*values)] def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]: if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: (_, max_height, max_width) = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: (max_height, max_width, _) = max_across_indices([img.shape for img in images]) else: raise ValueError(f'Invalid channel dimension format: {input_data_format}') return (max_height, max_width) def get_resize_output_image_size(input_image: np.ndarray, shorter: int=800, longer: int=1333, size_divisor: int=32, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: (input_height, input_width) = get_image_size(input_image, input_data_format) (min_size, max_size) = (shorter, longer) scale = min_size / min(input_height, input_width) if input_height < input_width: new_height = min_size new_width = scale * input_width else: new_height = scale * input_height new_width = min_size if max(new_height, new_width) > max_size: scale = max_size / max(new_height, new_width) new_height = scale * new_height new_width = scale * new_width (new_height, new_width) = (int(new_height + 0.5), int(new_width + 0.5)) new_height = new_height // size_divisor * size_divisor new_width = new_width // size_divisor * size_divisor return (new_height, new_width) class BridgeTowerImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_pad: bool=True, **kwargs) -> None: if 'pad_and_return_pixel_mask' in kwargs: do_pad = kwargs.pop('pad_and_return_pixel_mask') super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 288} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_pad = do_pad self.do_center_crop = do_center_crop self.crop_size = crop_size def resize(self, image: np.ndarray, size: Dict[str, int], size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size, default_to_square=False) if 'shortest_edge' not in size: raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}') shorter = size['shortest_edge'] longer = int(1333 / 800 * shorter) output_size = get_resize_output_image_size(image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def center_crop(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: output_size = size['shortest_edge'] return center_crop(image, size=(output_size, output_size), data_format=data_format, input_data_format=input_data_format, **kwargs) def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) return padded_image def pad(self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature: pad_size = get_max_height_width(images, input_data_format=input_data_format) padded_images = [self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images] data = {'pixel_values': padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images] data['pixel_mask'] = masks return BatchFeature(data=data, tensor_type=return_tensors) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, size_divisor: Optional[int]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, do_center_crop: Optional[bool]=None, crop_size: Dict[str, int]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size if self.crop_size is not None else self.size size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) if not is_batched(images): images = [images] if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size_divisor, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if do_resize: images = [self.resize(image=image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] if do_pad: encoded_outputs = self.pad(images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format) else: encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) return encoded_outputs # File: transformers-main/src/transformers/models/bridgetower/modeling_bridgetower.py """""" import math from collections import OrderedDict from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN, QuickGELUActivation from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'BridgeTowerConfig' _CHECKPOINT_FOR_DOC = 'BridgeTower/bridgetower-base' _TOKENIZER_FOR_DOC = 'RobertaTokenizer' BRIDGETOWER_START_DOCSTRING = '\n This model is a PyTorch `torch.nn.Module `_ subclass. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`BridgeTowerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BRIDGETOWER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`BridgeTowerImageProcessor`]. See\n [`BridgeTowerImageProcessor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n image_token_type_idx (`int`, *optional*):\n - The token type ids for images.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @dataclass class BridgeTowerModelOutput(ModelOutput): text_features: torch.FloatTensor = None image_features: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BridgeTowerContrastiveOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None text_embeds: Optional[Tuple[torch.FloatTensor]] = None image_embeds: Optional[Tuple[torch.FloatTensor]] = None cross_embeds: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class BridgeTowerResidualAttention(nn.Module): def __init__(self, config): super().__init__() self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64) self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = nn.ModuleDict(OrderedDict([('c_fc', nn.Linear(config.hidden_size, config.hidden_size * 4)), ('gelu', QuickGELUActivation()), ('c_proj', nn.Linear(config.hidden_size * 4, config.hidden_size))])) self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attn_mask = None def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor): if attention_mask is not None: attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device) self.attn_mask = self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device) if self.attn_mask is not None else None return self.attn(hidden_state, hidden_state, hidden_state, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=attention_mask)[0] def forward(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor=None): residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask) hidden_state = self.ln_2(residual_state) for (_, layer) in self.mlp.items(): hidden_state = layer(hidden_state) hidden_state = residual_state + hidden_state return hidden_state class BridgeTowerTransformer(nn.Module): def __init__(self, config): super().__init__() self.hidden_size = config.hidden_size self.num_hidden_layers = config.num_hidden_layers if config.remove_last_layer: self.resblocks = nn.ModuleList([BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers - 1)]) else: self.resblocks = nn.ModuleList([BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers)]) self.stop_gradient = config.stop_gradient def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None): hidden_states = [] for block in self.resblocks: hidden_state = block(hidden_state, attention_mask) if self.stop_gradient: hidden_states.append(hidden_state.detach()) else: hidden_states.append(hidden_state) return hidden_states class BridgeTowerVisionEmbeddings(nn.Module): def __init__(self, config: BridgeTowerVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class BridgeTowerVisionTransformer(nn.Module): def __init__(self, config): super().__init__() self.embeddings = BridgeTowerVisionEmbeddings(config) self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.transformer = BridgeTowerTransformer(config) self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.share_layernorm = config.share_layernorm if not config.share_layernorm: self.ln_separate = nn.ModuleList([nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)]) def forward(self, pixel_values: torch.Tensor, attention_mask): hidden_states = self.embeddings(pixel_values) hidden_states = self.ln_pre(hidden_states) hidden_states = hidden_states.permute(1, 0, 2) hidden_states = self.transformer(hidden_states, attention_mask) hidden_states = torch.stack(hidden_states, dim=0) hidden_states = hidden_states.permute(0, 2, 1, 3) if self.share_layernorm: hidden_states = self.ln_post(hidden_states) else: hidden_states_stack = [] for (hidden_states, ln) in zip(hidden_states, self.ln_separate): hidden_states = ln(hidden_states) hidden_states_stack.append(hidden_states) hidden_states = torch.stack(hidden_states_stack, dim=0) return hidden_states def forward_pre(self, pixel_values: torch.Tensor): hidden_states = self.embeddings(pixel_values) hidden_states = self.ln_pre(hidden_states) hidden_states = hidden_states.permute(1, 0, 2) return hidden_states def forward_post(self, hidden_state: torch.Tensor): visual_output_post = hidden_state.permute(1, 0, 2) visual_output_post = self.ln_post(visual_output_post) return visual_output_post class BridgeTowerLinkTower(nn.Module): def __init__(self, config): super().__init__() self.link_tower_type = config.link_tower_type self.hidden_size = config.hidden_size if config.link_tower_type in ['add', 'scaled_add', 'interpolate']: if config.link_tower_type == 'scaled_add': self.scaled_factor = nn.Parameter(torch.tensor(1.0)) elif config.link_tower_type == 'interpolate': self.beta = nn.Parameter(torch.tensor(0.5)) self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) else: raise NotImplementedError(f'link_tower_type {config.link_tower_type} is not implemented') def forward(self, hidden_states, cross_modal_hidden_states, attention_mask): if self.link_tower_type == 'add': return self.LayerNorm(hidden_states + cross_modal_hidden_states) elif self.link_tower_type == 'scaled_add': return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states) elif self.link_tower_type == 'interpolate': return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta) else: raise NotImplementedError(f'link_tower_type {self.link_tower_type} is not implemented') class BridgeTowerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BridgeTowerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BridgeTowerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BridgeTowerPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BridgeTowerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs BRIDGE_TOWER_SELF_ATTENTION_CLASSES = {'eager': BridgeTowerSelfAttention} class BridgeTowerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BRIDGE_TOWER_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = BridgeTowerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BridgeTowerBertCrossLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BridgeTowerAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention self.crossattention = BridgeTowerAttention(config) self.intermediate = BridgeTowerIntermediate(config) self.output = BridgeTowerOutput(config) def forward(self, hidden_states, encoder_hidden_states, attention_mask=None, head_mask=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=None, output_attentions=output_attentions, past_key_value=None) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] cross_attention_outputs = self.crossattention(attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BridgeTowerTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BridgeTowerAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = BridgeTowerAttention(config, position_embedding_type='absolute') self.intermediate = BridgeTowerIntermediate(config) self.output = BridgeTowerOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BridgeTowerTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BridgeTowerTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class BridgeTowerTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx class BridgeTowerPreTrainedModel(PreTrainedModel): config_class = BridgeTowerConfig base_model_prefix = 'bridgetower' supports_gradient_checkpointing = False _no_split_modules = ['BridgeTowerSelfAttention', 'BridgeTowerResidualAttention'] _skip_keys_device_placement = 'past_key_values' def _init_weights(self, module): if isinstance(module, BridgeTowerVisionModel): proj_std = module.visual.transformer.hidden_size ** (-0.5) * (2 * module.visual.transformer.num_hidden_layers) ** (-0.5) attn_std = module.visual.transformer.hidden_size ** (-0.5) fc_std = (2 * module.visual.transformer.hidden_size) ** (-0.5) for block in module.visual.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std * self.config.initializer_factor) nn.init.normal_(block.attn.out_proj.weight, std=proj_std * self.config.initializer_factor) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std * self.config.initializer_factor) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std * self.config.initializer_factor) nn.init.normal_(module.visual.embeddings.class_embedding, std=attn_std * self.config.initializer_factor) nn.init.normal_(module.visual.embeddings.position_embedding.weight, std=attn_std * self.config.initializer_factor) elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.05 * self.config.initializer_factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BridgeTowerVisionModel(BridgeTowerPreTrainedModel): config_class = BridgeTowerVisionConfig def __init__(self, config): super().__init__(config) self.visual = BridgeTowerVisionTransformer(config) @property def dtype(self): return self.visual.embeddings.patch_embedding.weight.dtype def forward(self, image, image_mask=None): return self.visual(image.type(self.dtype), image_mask) class BridgeTowerTextModel(BridgeTowerPreTrainedModel): config_class = BridgeTowerTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BridgeTowerTextEmbeddings(config) self.encoder = BridgeTowerTextEncoder(config) self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on top.', BRIDGETOWER_START_DOCSTRING) class BridgeTowerModel(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config vision_config = config.vision_config text_config = config.text_config if config.share_cross_modal_transformer_layers: self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size) self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size) else: self.cross_modal_text_transform = nn.ModuleList([nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]) self.cross_modal_image_transform = nn.ModuleList([nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]) self.token_type_embeddings = nn.Embedding(2, config.hidden_size) self.vision_model = BridgeTowerVisionModel(vision_config) self.text_model = BridgeTowerTextModel(text_config) if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder: for ln in self.vision_model.visual.cross_modal_ln_separate: ln.weight.data = self.vision_model.visual.ln_post.weight.data ln.bias.data = self.vision_model.visual.ln_post.bias.data self.cross_modal_image_layers = nn.ModuleList([BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]) self.cross_modal_text_layers = nn.ModuleList([BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)]) self.cross_modal_image_pooler = BridgeTowerPooler(config) self.cross_modal_text_pooler = BridgeTowerPooler(config) self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.share_link_tower_layers: self.cross_modal_text_link_tower = BridgeTowerLinkTower(config) self.cross_modal_image_link_tower = BridgeTowerLinkTower(config) else: self.cross_modal_text_link_tower = nn.ModuleList([BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]) self.cross_modal_image_link_tower = nn.ModuleList([BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]) self.post_init() def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BridgeTowerModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, image_token_type_idx: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], BridgeTowerModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states all_hidden_states_text = () if output_hidden_states else None all_hidden_states_image = () if output_hidden_states else None all_hidden_states_cross = () if output_hidden_states else None all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if inputs_embeds is not None and input_ids is None: raise NotImplementedError('BridgeTowerModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead.') return_dict = return_dict if return_dict is not None else self.config.use_return_dict image_token_type_idx = image_token_type_idx if image_token_type_idx else 1 input_shape = input_ids.size() text_embeds = self.text_model.embeddings(input_ids=input_ids) if output_hidden_states: all_hidden_states_text += (text_embeds,) if attention_mask is None: attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device) extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to(input_ids.device) split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1 for layer in self.text_model.encoder.layer[:split_index]: text_embeds = layer(text_embeds, extend_text_masks)[0] if output_hidden_states: all_hidden_states_text += (text_embeds,) if image_embeds is None: image_embeds = self.vision_model.visual.forward_pre(pixel_values.type(self.vision_model.dtype)) else: image_embeds = image_embeds.permute(1, 0, 2) if output_hidden_states: all_hidden_states_image += (image_embeds,) for block in self.vision_model.visual.transformer.resblocks[:split_index]: image_embeds = block(image_embeds) if output_hidden_states: all_hidden_states_image += (image_embeds,) image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype)) cross_modal_text = self.cross_modal_text_transform(text_embeds) text_token_type_embeddings = self.token_type_embeddings(torch.zeros(1, dtype=torch.long, device=input_ids.device)).expand_as(cross_modal_text) cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings) image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln) image_token_type_embeddings = self.token_type_embeddings(torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device)).expand_as(image_embeds_with_ln) image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln) pixel_mask = torch.ones((cross_modal_image.size(0), cross_modal_image.size(1)), dtype=torch.long, device=input_ids.device) extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to(input_ids.device) layer_outputs_text = self.cross_modal_text_layers[0](cross_modal_text, cross_modal_image, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions) cross_text_features = layer_outputs_text[0] layer_outputs_image = self.cross_modal_image_layers[0](cross_modal_image, cross_modal_text, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions) cross_image_features = layer_outputs_image[0] if output_hidden_states: all_hidden_states_cross += ((cross_text_features, cross_image_features),) if output_attentions: all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),) link_layer_index = 0 for i in range(split_index, len(self.text_model.encoder.layer)): text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0] image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type(self.vision_model.dtype) image_embeds_with_ln = self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds)) + image_token_type_embeddings text_link_tower = self.cross_modal_text_link_tower[link_layer_index] image_link_tower = self.cross_modal_image_link_tower[link_layer_index] cross_text_features_ = text_link_tower(self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings, cross_text_features, extend_text_masks) cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks) layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1](cross_text_features_, cross_image_features_, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions) cross_text_features = layer_outputs_text[0] layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1](cross_image_features_, cross_text_features_, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions) cross_image_features = layer_outputs_image[0] link_layer_index += 1 if output_hidden_states: all_hidden_states_text += (text_embeds,) all_hidden_states_image += (image_embeds,) all_hidden_states_cross += ((cross_text_features, cross_image_features),) if output_attentions: all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),) (text_features, image_features) = (cross_text_features, cross_image_features) cls_features = self.get_cls_features(text_features, image_features) if output_hidden_states: all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross) if not return_dict: return tuple((v for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions] if v is not None)) return BridgeTowerModelOutput(text_features=text_features, image_features=image_features, pooler_output=cls_features, hidden_states=all_hidden_states, attentions=all_self_attentions) def get_cls_features(self, text_features, image_features): cls_features_text = self.cross_modal_text_pooler(text_features) cls_features_image = self.cross_modal_image_pooler(image_features) return torch.cat([cls_features_text, cls_features_image], dim=-1) class BridgeTowerPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BridgeTowerMLMHead(nn.Module): def __init__(self, config, weight=None): super().__init__() self.config = config self.transform = BridgeTowerPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size)) if weight is not None: self.decoder.weight = weight def forward(self, x): mlm_score = self.transform(x) mlm_score = self.decoder(mlm_score) + self.bias return mlm_score class BridgeTowerITMHead(nn.Module): def __init__(self, hidden_size): super().__init__() self.fc = nn.Linear(hidden_size, 2) def forward(self, x): itm_score = self.fc(x) return itm_score @add_start_docstrings('\n BridgeTower Model with a language modeling head on top as done during pretraining.\n ', BRIDGETOWER_START_DOCSTRING) class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel): _tied_weights_keys = ['mlm_score.decoder.weight'] def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.mlm_score = BridgeTowerMLMHead(config) self.post_init() def get_output_embeddings(self): return self.mlm_score.decoder def set_output_embeddings(self, new_embeddings): self.mlm_score.decoder = new_embeddings @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) mlm_logits = self.mlm_score(outputs.text_features if return_dict else outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(mlm_logits.device) masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.text_config.vocab_size), labels.view(-1)) if not return_dict: output = tuple(mlm_logits) return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=mlm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n BridgeTower Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the\n [CLS] token) for image-to-text matching.\n ', BRIDGETOWER_START_DOCSTRING) class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.itm_score = BridgeTowerITMHead(config.hidden_size * 2) self.post_init() @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooler_output = outputs.pooler_output if return_dict else outputs[2] logits = self.itm_score(pooler_output) itm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) itm_loss = loss_fct(logits, labels) if not return_dict: output = tuple(logits) return (itm_loss,) + output if itm_loss is not None else output return SequenceClassifierOutput(loss=itm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class BridgeTowerContrastiveHead(nn.Module): def __init__(self, hidden_size, embed_size): super().__init__() self.fc = nn.Linear(hidden_size, embed_size) def forward(self, x): x = self.fc(x) return x @add_start_docstrings('\n BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss.\n ', BRIDGETOWER_START_DOCSTRING) class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel): def __init__(self, config): super().__init__(config) self.bridgetower = BridgeTowerModel(config) self.itc_text_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size) self.itc_image_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size) self.itc_cross_modal_head = BridgeTowerContrastiveHead(config.hidden_size * 2, config.contrastive_hidden_size) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(BRIDGETOWER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BridgeTowerContrastiveOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[BridgeTowerContrastiveOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) pooler_output = outputs.pooler_output if return_dict else outputs[2] (hidden_states_txt, hidden_states_img, hidden_states_cross_modal) = outputs.hidden_states if return_dict else outputs[3] text_embeds = hidden_states_txt[-1] image_embeds = hidden_states_img[-1] image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds) image_token_type_embeddings = self.bridgetower.token_type_embeddings(torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)).expand_as(image_embeds_with_ln) image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2) image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to(device=text_embeds.device) cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to(device=text_embeds.device) logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2) logit_scale = self.logit_scale.exp().to(device=text_embeds.device) logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale itc_loss = None if return_loss: labels = torch.arange(len(logits), device=logits.device) text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels) text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels) image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels) itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0 if not return_dict: output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:] return (itc_loss,) + output if itc_loss is not None else output return BridgeTowerContrastiveOutput(loss=itc_loss, logits=logits, text_embeds=text_embeds, image_embeds=image_embeds, cross_embeds=cross_embeds, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/bridgetower/processing_bridgetower.py """""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class BridgeTowerProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'BridgeTowerImageProcessor' tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__(self, images, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding: encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) encoding_image_processor = self.image_processor(images, return_tensors=return_tensors, do_normalize=True, do_center_crop=True, **kwargs) encoding.update(encoding_image_processor) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/bros/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_bros': ['BrosConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['processing_bros'] = ['BrosProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_bros'] = ['BrosPreTrainedModel', 'BrosModel', 'BrosForTokenClassification', 'BrosSpadeEEForTokenClassification', 'BrosSpadeELForTokenClassification'] if TYPE_CHECKING: from .configuration_bros import BrosConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .processing_bros import BrosProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bros import BrosForTokenClassification, BrosModel, BrosPreTrainedModel, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/bros/configuration_bros.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class BrosConfig(PretrainedConfig): model_type = 'bros' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, dim_bbox=8, bbox_scale=100.0, n_relations=1, classifier_dropout_prob=0.1, **kwargs): super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, **kwargs) self.dim_bbox = dim_bbox self.bbox_scale = bbox_scale self.n_relations = n_relations self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4 self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox self.dim_bbox_projection = self.hidden_size // self.num_attention_heads self.classifier_dropout_prob = classifier_dropout_prob # File: transformers-main/src/transformers/models/bros/convert_bros_to_pytorch.py """""" import argparse import bros import torch from transformers import BrosConfig, BrosModel, BrosProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_configs(model_name): bros_config = BrosConfig.from_pretrained(model_name) return bros_config def remove_ignore_keys_(state_dict): ignore_keys = ['embeddings.bbox_sinusoid_emb.inv_freq'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if name == 'embeddings.bbox_projection.weight': name = 'bbox_embeddings.bbox_projection.weight' if name == 'embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq': name = 'bbox_embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq' if name == 'embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq': name = 'bbox_embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq' return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) orig_state_dict[rename_key(key)] = val remove_ignore_keys_(orig_state_dict) return orig_state_dict def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): original_model = bros.BrosModel.from_pretrained(model_name).eval() bros_config = get_configs(model_name) model = BrosModel.from_pretrained(model_name, config=bros_config) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) bbox = torch.tensor([[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4396, 0.672, 0.4659, 0.672, 0.4659, 0.685, 0.4396, 0.685], [0.4698, 0.672, 0.4843, 0.672, 0.4843, 0.685, 0.4698, 0.685], [0.4698, 0.672, 0.4843, 0.672, 0.4843, 0.685, 0.4698, 0.685], [0.2047, 0.687, 0.273, 0.687, 0.273, 0.7, 0.2047, 0.7], [0.2047, 0.687, 0.273, 0.687, 0.273, 0.7, 0.2047, 0.7], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]]) processor = BrosProcessor.from_pretrained(model_name) encoding = processor('His name is Rocco.', return_tensors='pt') encoding['bbox'] = bbox original_hidden_states = original_model(**encoding).last_hidden_state last_hidden_states = model(**encoding).last_hidden_state assert torch.allclose(original_hidden_states, last_hidden_states, atol=0.0001) if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub('jinho8345/' + model_name.split('/')[-1], commit_message='Update model') processor.push_to_hub('jinho8345/' + model_name.split('/')[-1], commit_message='Update model') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='jinho8345/bros-base-uncased', required=False, type=str, help="Name of the original model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.') args = parser.parse_args() convert_bros_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/bros/modeling_bros.py """""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_bros import BrosConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'jinho8345/bros-base-uncased' _CONFIG_FOR_DOC = 'BrosConfig' BROS_START_DOCSTRING = '\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BrosConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' BROS_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):\n Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values\n (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the\n bounding box.\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @dataclass class BrosSpadeOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None initial_token_logits: torch.FloatTensor = None subsequent_token_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class BrosPositionalEmbedding1D(nn.Module): def __init__(self, config): super(BrosPositionalEmbedding1D, self).__init__() self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d inv_freq = 1 / 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq: torch.Tensor) -> torch.Tensor: seq_size = pos_seq.size() (b1, b2, b3) = seq_size sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) return pos_emb class BrosPositionalEmbedding2D(nn.Module): def __init__(self, config): super(BrosPositionalEmbedding2D, self).__init__() self.dim_bbox = config.dim_bbox self.x_pos_emb = BrosPositionalEmbedding1D(config) self.y_pos_emb = BrosPositionalEmbedding1D(config) def forward(self, bbox: torch.Tensor) -> torch.Tensor: stack = [] for i in range(self.dim_bbox): if i % 2 == 0: stack.append(self.x_pos_emb(bbox[..., i])) else: stack.append(self.y_pos_emb(bbox[..., i])) bbox_pos_emb = torch.cat(stack, dim=-1) return bbox_pos_emb class BrosBboxEmbeddings(nn.Module): def __init__(self, config): super(BrosBboxEmbeddings, self).__init__() self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config) self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False) def forward(self, bbox: torch.Tensor): bbox_t = bbox.transpose(0, 1) bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :] bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos) bbox_pos_emb = self.bbox_projection(bbox_pos_emb) return bbox_pos_emb class BrosTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BrosSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[torch.Tensor]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key (batch_size, n_head, seq_length, d_head) = query_layer.shape bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head) bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3]) bbox_pos_scores = torch.einsum('bnid,bijd->bnij', (query_layer, bbox_pos_emb)) attention_scores = attention_scores + bbox_pos_scores attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BrosSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BrosAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BrosSelfAttention(config) self.output = BrosSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class BrosIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BrosOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BrosLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BrosAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise Exception(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = BrosAttention(config) self.intermediate = BrosIntermediate(config) self.output = BrosOutput(config) def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if hasattr(self, 'crossattention'): raise Exception(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BrosEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, 'gradient_checkpointing', False) and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, bbox_pos_emb, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions) else: layer_outputs = layer_module(hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=layer_head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class BrosPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BrosRelationExtractor(nn.Module): def __init__(self, config): super().__init__() self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.head_hidden_size = config.hidden_size self.classifier_dropout_prob = config.classifier_dropout_prob self.drop = nn.Dropout(self.classifier_dropout_prob) self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size) self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size) self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size)) def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor): query_layer = self.query(self.drop(query_layer)) dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1) key_layer = torch.cat([key_layer, dummy_vec], axis=0) key_layer = self.key(self.drop(key_layer)) query_layer = query_layer.view(query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size) key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size) relation_score = torch.matmul(query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)) return relation_score class BrosPreTrainedModel(PreTrainedModel): config_class = BrosConfig base_model_prefix = 'bros' def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings('The bare Bros Model transformer outputting raw hidden-states without any specific head on top.', BROS_START_DOCSTRING) class BrosModel(BrosPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BrosTextEmbeddings(config) self.bbox_embeddings = BrosBboxEmbeddings(config) self.encoder = BrosEncoder(config) self.pooler = BrosPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if bbox is None: raise ValueError('You have to specify bbox') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) if bbox.shape[-1] == 4: bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]] scaled_bbox = bbox * self.config.bbox_scale bbox_position_embeddings = self.bbox_embeddings(scaled_bbox) encoder_outputs = self.encoder(embedding_output, bbox_pos_emb=bbox_position_embeddings, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', BROS_START_DOCSTRING) class BrosForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['pooler'] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bros = BrosModel(config) classifier_dropout = config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if bbox_first_token_mask is not None: bbox_first_token_mask = bbox_first_token_mask.view(-1) loss = loss_fct(logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the\n hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to\n predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent\n tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors\n since it predicts next token from one token.\n ', BROS_START_DOCSTRING) class BrosSpadeEEForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['pooler'] def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.bros = BrosModel(config) classifier_dropout = config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob self.initial_token_classifier = nn.Sequential(nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.hidden_size), nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.num_labels)) self.subsequent_token_classifier = BrosRelationExtractor(config) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, initial_token_labels: Optional[torch.Tensor]=None, subsequent_token_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = last_hidden_states.transpose(0, 1).contiguous() initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous() subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0) inv_attention_mask = 1 - attention_mask (batch_size, max_seq_length) = inv_attention_mask.shape device = inv_attention_mask.device invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool() subsequent_token_logits = subsequent_token_logits.masked_fill(invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min) self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool() subsequent_token_logits = subsequent_token_logits.masked_fill(self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min) subsequent_token_mask = attention_mask.view(-1).bool() loss = None if initial_token_labels is not None and subsequent_token_labels is not None: loss_fct = CrossEntropyLoss() initial_token_labels = initial_token_labels.view(-1) if bbox_first_token_mask is not None: bbox_first_token_mask = bbox_first_token_mask.view(-1) initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask], initial_token_labels[bbox_first_token_mask]) else: initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels) subsequent_token_labels = subsequent_token_labels.view(-1) subsequent_token_loss = loss_fct(subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask], subsequent_token_labels[subsequent_token_mask]) loss = initial_token_loss + subsequent_token_loss if not return_dict: output = (initial_token_logits, subsequent_token_logits) + outputs[2:] return (loss,) + output if loss is not None else output return BrosSpadeOutput(loss=loss, initial_token_logits=initial_token_logits, subsequent_token_logits=subsequent_token_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.\n for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).\n ', BROS_START_DOCSTRING) class BrosSpadeELForTokenClassification(BrosPreTrainedModel): _keys_to_ignore_on_load_unexpected = ['pooler'] def __init__(self, config): super().__init__(config) self.config = config self.num_labels = config.num_labels self.n_relations = config.n_relations self.backbone_hidden_size = config.hidden_size self.bros = BrosModel(config) config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob self.entity_linker = BrosRelationExtractor(config) self.init_weights() @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bros(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = last_hidden_states.transpose(0, 1).contiguous() logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0) loss = None if labels is not None: loss_fct = CrossEntropyLoss() (batch_size, max_seq_length) = attention_mask.shape device = attention_mask.device self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool() mask = bbox_first_token_mask.view(-1) bbox_first_token_mask = torch.cat([~bbox_first_token_mask, torch.zeros([batch_size, 1], dtype=torch.bool).to(device)], axis=1) logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min) logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min) loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask]) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/bros/processing_bros.py """""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class BrosProcessor(ProcessorMixin): attributes = ['tokenizer'] tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, tokenizer=None, **kwargs): if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(tokenizer) def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding: encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names return list(dict.fromkeys(tokenizer_input_names)) # File: transformers-main/src/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py """""" import argparse from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): config = T5Config.from_json_file(config_file) print(f'Building PyTorch model from configuration: {config}') model = T5ForConditionalGeneration(config) load_tf_weights_in_t5(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/byt5/tokenization_byt5.py """""" import warnings from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) class ByT5Tokenizer(PreTrainedTokenizer): model_input_names = ['input_ids', 'attention_mask'] def __init__(self, eos_token='', unk_token='', pad_token='', extra_ids=125, additional_special_tokens=None, **kwargs) -> None: if extra_ids > 0 and additional_special_tokens is None: additional_special_tokens = [f'' for i in range(extra_ids)] elif extra_ids > 0 and additional_special_tokens is not None and (len(additional_special_tokens) > 0): extra_tokens = len(set(filter(lambda x: bool('extra_id' in str(x)), additional_special_tokens))) if extra_tokens != extra_ids: raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to ByT5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens') pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token} self.offset = len(self._added_tokens_decoder) self._utf_vocab_size = 2 ** 8 super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs) @property def vocab_size(self): return self._utf_vocab_size def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} vocab.update(self.added_tokens_encoder) return vocab def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [0] * len(token_ids_0) + [1] return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.') return token_ids else: return token_ids + [self.eos_token_id] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: token_ids_0 = self._add_eos_if_not_present(token_ids_0) if token_ids_1 is None: return token_ids_0 else: token_ids_1 = self._add_eos_if_not_present(token_ids_1) return token_ids_0 + token_ids_1 def _tokenize(self, text: str) -> List[str]: tokens = [chr(i) for i in text.encode('utf-8')] return tokens def _convert_token_to_id(self, token): if len(token) != 1: token_id = None else: token_id = ord(token) + self.offset return token_id def _convert_id_to_token(self, index): token = chr(index - self.offset) return token def convert_tokens_to_string(self, tokens): bstring = b'' for token in tokens: if token in self.added_tokens_decoder: tok_string = self.added_tokens_decoder[token].encode('utf-8') elif token in self.added_tokens_encoder: tok_string = token.encode('utf-8') else: tok_string = bytes([ord(token)]) bstring += tok_string string = bstring.decode('utf-8', errors='ignore') return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: return () # File: transformers-main/src/transformers/models/camembert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_camembert': ['CamembertConfig', 'CamembertOnnxConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_camembert'] = ['CamembertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_camembert_fast'] = ['CamembertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_camembert'] = ['CamembertForCausalLM', 'CamembertForMaskedLM', 'CamembertForMultipleChoice', 'CamembertForQuestionAnswering', 'CamembertForSequenceClassification', 'CamembertForTokenClassification', 'CamembertModel', 'CamembertPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_camembert'] = ['TFCamembertForCausalLM', 'TFCamembertForMaskedLM', 'TFCamembertForMultipleChoice', 'TFCamembertForQuestionAnswering', 'TFCamembertForSequenceClassification', 'TFCamembertForTokenClassification', 'TFCamembertModel', 'TFCamembertPreTrainedModel'] if TYPE_CHECKING: from .configuration_camembert import CamembertConfig, CamembertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_camembert import CamembertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_camembert_fast import CamembertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_camembert import CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, CamembertPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_camembert import TFCamembertForCausalLM, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, TFCamembertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/camembert/configuration_camembert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class CamembertConfig(PretrainedConfig): model_type = 'camembert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class CamembertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/camembert/modeling_camembert.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, get_torch_version, logging, replace_return_docstrings from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'almanach/camembert-base' _CONFIG_FOR_DOC = 'CamembertConfig' CAMEMBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`CamembertConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' class CamembertEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) class CamembertSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class CamembertSdpaSelfAttention(CamembertSelfAttention): def __init__(self, config, position_embedding_type=None): super().__init__(config, position_embedding_type=position_embedding_type) self.dropout_prob = config.attention_probs_dropout_prob self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse('2.2.0') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: if self.position_embedding_type != 'absolute' or output_attentions or head_mask is not None: logger.warning_once('CamembertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) (bsz, tgt_len, _) = hidden_states.size() query_layer = self.transpose_for_scores(self.query(hidden_states)) is_cross_attention = encoder_hidden_states is not None current_states = encoder_hidden_states if is_cross_attention else hidden_states attention_mask = encoder_attention_mask if is_cross_attention else attention_mask if is_cross_attention and past_key_value and (past_key_value[0].shape[2] == current_states.shape[1]): (key_layer, value_layer) = past_key_value else: key_layer = self.transpose_for_scores(self.key(current_states)) value_layer = self.transpose_for_scores(self.value(current_states)) if past_key_value is not None and (not is_cross_attention): key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) if self.is_decoder: past_key_value = (key_layer, value_layer) if self.require_contiguous_qkv and query_layer.device.type == 'cuda' and (attention_mask is not None): query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() is_causal = True if self.is_decoder and (not is_cross_attention) and (attention_mask is None) and (tgt_len > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size) outputs = (attn_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class CamembertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states CAMEMBERT_SELF_ATTENTION_CLASSES = {'eager': CamembertSelfAttention, 'sdpa': CamembertSdpaSelfAttention} class CamembertAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = CAMEMBERT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = CamembertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class CamembertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class CamembertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CamembertLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = CamembertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = CamembertAttention(config, position_embedding_type='absolute') self.intermediate = CamembertIntermediate(config) self.output = CamembertOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class CamembertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([CamembertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class CamembertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class CamembertPreTrainedModel(PreTrainedModel): config_class = CamembertConfig base_model_prefix = 'roberta' supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CAMEMBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class CamembertClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x class CamembertLMHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x def _tie_weights(self): if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings('The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.', CAMEMBERT_START_DOCSTRING) class CamembertModel(CamembertPreTrainedModel): _no_split_modules = [] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = CamembertEmbeddings(config) self.encoder = CamembertEncoder(config) self.pooler = CamembertPooler(config) if add_pooling_layer else None self.attn_implementation = config._attn_implementation self.position_embedding_type = config.position_embedding_type self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) use_sdpa_attention_masks = self.attn_implementation == 'sdpa' and self.position_embedding_type == 'absolute' and (head_mask is None) and (not output_attentions) if use_sdpa_attention_masks and attention_mask.dim() == 2: if self.config.is_decoder: extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, embedding_output, past_key_values_length) else: extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embedding_output.dtype, tgt_len=seq_length) else: extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('CamemBERT Model with a `language modeling` head on top.', CAMEMBERT_START_DOCSTRING) class CamembertForMaskedLM(CamembertPreTrainedModel): _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `CamembertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.roberta = CamembertModel(config, add_pooling_layer=False) self.lm_head = CamembertLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='', expected_output="' Paris'", expected_loss=0.1) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: labels = labels.to(prediction_scores.device) loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', CAMEMBERT_START_DOCSTRING) class CamembertForSequenceClassification(CamembertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta = CamembertModel(config, add_pooling_layer=False) self.classifier = CamembertClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='cardiffnlp/twitter-roberta-base-emotion', output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'optimism'", expected_loss=0.08) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CAMEMBERT_START_DOCSTRING) class CamembertForMultipleChoice(CamembertPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta = CamembertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', CAMEMBERT_START_DOCSTRING) class CamembertForTokenClassification(CamembertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = CamembertModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='Jean-Baptiste/roberta-large-ner-english', output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", expected_loss=0.01) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`\n ', CAMEMBERT_START_DOCSTRING) class CamembertForQuestionAnswering(CamembertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = CamembertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='deepset/roberta-base-squad2', output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.86) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.', CAMEMBERT_START_DOCSTRING) class CamembertForCausalLM(CamembertPreTrainedModel): _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `CamembertLMHeadModel` as a standalone, add `is_decoder=True.`') self.roberta = CamembertModel(config, add_pooling_layer=False) self.lm_head = CamembertLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Tuple[Tuple[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: labels = labels.to(prediction_scores.device) shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # File: transformers-main/src/transformers/models/camembert/modeling_tf_camembert.py """""" from __future__ import annotations import math import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'almanach/camembert-base' _CONFIG_FOR_DOC = 'CamembertConfig' CAMEMBERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`CamembertConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CAMEMBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" class TFCamembertEmbeddings(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.padding_idx = 1 self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('token_type_embeddings'): self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + self.padding_idx def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False): assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: if input_ids is not None: position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length) else: position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFCamembertPooler(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFCamembertSelfAttention(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.dropout(inputs=attention_probs, training=training) if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFCamembertSelfOutput(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFCamembertAttention(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFCamembertSelfAttention(config, name='self') self.dense_output = TFCamembertSelfOutput(config, name='output') def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_outputs = self.self_attention(hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attention', None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, 'dense_output', None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFCamembertIntermediate(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFCamembertOutput(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFCamembertLayer(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFCamembertAttention(config, name='attention') self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = TFCamembertAttention(config, name='crossattention') self.intermediate = TFCamembertIntermediate(config, name='intermediate') self.bert_output = TFCamembertOutput(config, name='output') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'intermediate', None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, 'bert_output', None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, 'crossattention', None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) class TFCamembertEncoder(keras.layers.Layer): def __init__(self, config: CamembertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFCamembertLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFCamembertMainLayer(keras.layers.Layer): config_class = CamembertConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.encoder = TFCamembertEncoder(config, name='encoder') self.pooler = TFCamembertPooler(config, name='pooler') if add_pooling_layer else None self.embeddings = TFCamembertEmbeddings(config, name='embeddings') def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training) attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal(tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None]) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape(extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])) if past_key_values[0] is not None: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if self.is_decoder and encoder_attention_mask is not None: encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'pooler', None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) class TFCamembertPreTrainedModel(TFPreTrainedModel): config_class = CamembertConfig base_model_prefix = 'roberta' @add_start_docstrings('The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.', CAMEMBERT_START_DOCSTRING) class TFCamembertModel(TFCamembertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFCamembertMainLayer(config, name='roberta') @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]: outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) class TFCamembertLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.act = get_tf_activation('gelu') self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, value): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self): return {'bias': self.bias} def set_bias(self, value): self.bias = value['bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.layer_norm(hidden_states) seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states @add_start_docstrings('CamemBERT Model with a `language modeling` head on top.', CAMEMBERT_START_DOCSTRING) class TFCamembertForMaskedLM(TFCamembertPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'lm_head.decoder.weight'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name='roberta') self.lm_head = TFCamembertLMHead(config, self.roberta.embeddings, name='lm_head') def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='', expected_output="' Paris'", expected_loss=0.1) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFCamembertClassificationHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='out_proj') self.config = config def call(self, features, training=False): x = features[:, 0, :] x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', CAMEMBERT_START_DOCSTRING) class TFCamembertForSequenceClassification(TFCamembertPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'lm_head'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name='roberta') self.classifier = TFCamembertClassificationHead(config, name='classifier') @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='cardiffnlp/twitter-roberta-base-emotion', output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'optimism'", expected_loss=0.08) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings('\n CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', CAMEMBERT_START_DOCSTRING) class TFCamembertForTokenClassification(TFCamembertPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'lm_head'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name='roberta') classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='ydshieh/roberta-large-ner-english', output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", expected_loss=0.01) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CAMEMBERT_START_DOCSTRING) class TFCamembertForMultipleChoice(TFCamembertPreTrainedModel, TFMultipleChoiceLoss): _keys_to_ignore_on_load_unexpected = ['lm_head'] _keys_to_ignore_on_load_missing = ['dropout'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFCamembertMainLayer(config, name='roberta') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None outputs = self.roberta(flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', CAMEMBERT_START_DOCSTRING) class TFCamembertForQuestionAnswering(TFCamembertPreTrainedModel, TFQuestionAnsweringLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'lm_head'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name='roberta') self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='ydshieh/roberta-base-squad2', output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.86) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings('CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.', CAMEMBERT_START_DOCSTRING) class TFCamembertForCausalLM(TFCamembertPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = ['pooler', 'lm_head.decoder.weight'] def __init__(self, config: CamembertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning('If you want to use `TFCamembertLMHeadModel` as a standalone, add `is_decoder=True.`') self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name='roberta') self.lm_head = TFCamembertLMHead(config, input_embeddings=self.roberta.embeddings, name='lm_head') def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.lm_head.name def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = tf.ones(input_shape) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} @unpack_inputs @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.lm_head(hidden_states=sequence_output, training=training) loss = None if labels is not None: shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFCausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'roberta', None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) # File: transformers-main/src/transformers/models/camembert/tokenization_camembert.py """""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'} SPIECE_UNDERLINE = '▁' class CamembertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', additional_special_tokens=['NOTUSED', 'NOTUSED', 'NOTUSED'], sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False, special=True) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self._added_tokens_decoder = {0: AddedToken('NOTUSED', special=True), 1: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token, 2: AddedToken('NOTUSED', special=True), 3: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token, 4: AddedToken('NOTUSED', special=True)} self.fairseq_offset = 4 if 'added_tokens_decoder' in kwargs: kwargs['added_tokens_decoder'].update(self._added_tokens_decoder) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.fairseq_offset)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if self.sp_model.PieceToId(token) == 0: return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # File: transformers-main/src/transformers/models/camembert/tokenization_camembert_fast.py """""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: CamembertTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} SPIECE_UNDERLINE = '▁' class CamembertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = CamembertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', additional_special_tokens=['NOTUSED', 'NOTUSED', 'NOTUSED'], **kwargs): mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # File: transformers-main/src/transformers/models/canine/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_canine': ['CanineConfig'], 'tokenization_canine': ['CanineTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_canine'] = ['CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine'] if TYPE_CHECKING: from .configuration_canine import CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/canine/configuration_canine.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class CanineConfig(PretrainedConfig): model_type = 'canine' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=16384, type_vocab_size=16, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=57344, eos_token_id=57345, downsampling_rate=4, upsampling_kernel_size=4, num_hash_functions=8, num_hash_buckets=16384, local_transformer_stride=128, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.downsampling_rate = downsampling_rate self.upsampling_kernel_size = upsampling_kernel_size self.num_hash_functions = num_hash_functions self.num_hash_buckets = num_hash_buckets self.local_transformer_stride = local_transformer_stride # File: transformers-main/src/transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py """""" import argparse from transformers import CanineConfig, CanineModel, CanineTokenizer, load_tf_weights_in_canine from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, pytorch_dump_path): config = CanineConfig() model = CanineModel(config) model.eval() print(f'Building PyTorch model from configuration: {config}') load_tf_weights_in_canine(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(pytorch_dump_path) tokenizer = CanineTokenizer() print(f'Save tokenizer files to {pytorch_dump_path}') tokenizer.save_pretrained(pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint. Should end with model.ckpt') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to a folder where the PyTorch model will be placed.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/canine/modeling_canine.py """""" import copy import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_canine import CanineConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/canine-s' _CONFIG_FOR_DOC = 'CanineConfig' _PRIMES = [31, 43, 59, 61, 73, 97, 103, 113, 137, 149, 157, 173, 181, 193, 211, 223] @dataclass class CanineModelOutputWithPooling(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None def load_tf_weights_in_canine(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step', 'cls', 'autoregressive_decoder', 'char_output_weights'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue if name[0] == 'bert': name[0] = 'encoder' elif name[1] == 'embeddings': name.remove(name[1]) elif name[1] == 'segment_embeddings': name[1] = 'token_type_embeddings' elif name[1] == 'initial_char_encoder': name = ['chars_to_molecules'] + name[-2:] elif name[0] == 'final_char_encoder' and name[1] in ['LayerNorm', 'conv']: name = ['projection'] + name[1:] pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name) and 'Embedder' not in m_name: scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name[-10:] in [f'Embedder_{i}' for i in range(8)]: pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class CanineEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.config = config shard_embedding_size = config.hidden_size // config.num_hash_functions for i in range(config.num_hash_functions): name = f'HashBucketCodepointEmbedder_{i}' setattr(self, name, nn.Embedding(config.num_hash_buckets, shard_embedding_size)) self.char_position_embeddings = nn.Embedding(config.num_hash_buckets, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int): if num_hashes > len(_PRIMES): raise ValueError(f'`num_hashes` must be <= {len(_PRIMES)}') primes = _PRIMES[:num_hashes] result_tensors = [] for prime in primes: hashed = (input_ids + 1) * prime % num_buckets result_tensors.append(hashed) return result_tensors def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int): if embedding_size % num_hashes != 0: raise ValueError(f'Expected `embedding_size` ({embedding_size}) % `num_hashes` ({num_hashes}) == 0') hash_bucket_tensors = self._hash_bucket_tensors(input_ids, num_hashes=num_hashes, num_buckets=num_buckets) embedding_shards = [] for (i, hash_bucket_ids) in enumerate(hash_bucket_tensors): name = f'HashBucketCodepointEmbedder_{i}' shard_embeddings = getattr(self, name)(hash_bucket_ids) embedding_shards.append(shard_embeddings) return torch.cat(embedding_shards, dim=-1) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self._embed_hash_buckets(input_ids, self.config.hidden_size, self.config.num_hash_functions, self.config.num_hash_buckets) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.char_position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class CharactersToMolecules(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d(in_channels=config.hidden_size, out_channels=config.hidden_size, kernel_size=config.downsampling_rate, stride=config.downsampling_rate) self.activation = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, char_encoding: torch.Tensor) -> torch.Tensor: cls_encoding = char_encoding[:, 0:1, :] char_encoding = torch.transpose(char_encoding, 1, 2) downsampled = self.conv(char_encoding) downsampled = torch.transpose(downsampled, 1, 2) downsampled = self.activation(downsampled) downsampled_truncated = downsampled[:, 0:-1, :] result = torch.cat([cls_encoding, downsampled_truncated], dim=1) result = self.LayerNorm(result) return result class ConvProjection(nn.Module): def __init__(self, config): super().__init__() self.config = config self.conv = nn.Conv1d(in_channels=config.hidden_size * 2, out_channels=config.hidden_size, kernel_size=config.upsampling_kernel_size, stride=1) self.activation = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, inputs: torch.Tensor, final_seq_char_positions: Optional[torch.Tensor]=None) -> torch.Tensor: inputs = torch.transpose(inputs, 1, 2) pad_total = self.config.upsampling_kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg pad = nn.ConstantPad1d((pad_beg, pad_end), 0) result = self.conv(pad(inputs)) result = torch.transpose(result, 1, 2) result = self.activation(result) result = self.LayerNorm(result) result = self.dropout(result) final_char_seq = result if final_seq_char_positions is not None: raise NotImplementedError('CanineForMaskedLM is currently not supported') else: query_seq = final_char_seq return query_seq class CanineSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, from_tensor: torch.Tensor, to_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: mixed_query_layer = self.query(from_tensor) key_layer = self.transpose_for_scores(self.key(to_tensor)) value_layer = self.transpose_for_scores(self.value(to_tensor)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = from_tensor.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: if attention_mask.ndim == 3: attention_mask = torch.unsqueeze(attention_mask, dim=1) attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class CanineSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CanineAttention(nn.Module): def __init__(self, config, local=False, always_attend_to_first_position: bool=False, first_position_attends_to_all: bool=False, attend_from_chunk_width: int=128, attend_from_chunk_stride: int=128, attend_to_chunk_width: int=128, attend_to_chunk_stride: int=128): super().__init__() self.self = CanineSelfAttention(config) self.output = CanineSelfOutput(config) self.pruned_heads = set() self.local = local if attend_from_chunk_width < attend_from_chunk_stride: raise ValueError('`attend_from_chunk_width` < `attend_from_chunk_stride` would cause sequence positions to get skipped.') if attend_to_chunk_width < attend_to_chunk_stride: raise ValueError('`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.') self.always_attend_to_first_position = always_attend_to_first_position self.first_position_attends_to_all = first_position_attends_to_all self.attend_from_chunk_width = attend_from_chunk_width self.attend_from_chunk_stride = attend_from_chunk_stride self.attend_to_chunk_width = attend_to_chunk_width self.attend_to_chunk_stride = attend_to_chunk_stride def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: Tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: if not self.local: self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions) attention_output = self_outputs[0] else: from_seq_length = to_seq_length = hidden_states.shape[1] from_tensor = to_tensor = hidden_states from_chunks = [] if self.first_position_attends_to_all: from_chunks.append((0, 1)) from_start = 1 else: from_start = 0 for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride): chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width) from_chunks.append((chunk_start, chunk_end)) to_chunks = [] if self.first_position_attends_to_all: to_chunks.append((0, to_seq_length)) for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride): chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width) to_chunks.append((chunk_start, chunk_end)) if len(from_chunks) != len(to_chunks): raise ValueError(f'Expected to have same number of `from_chunks` ({from_chunks}) and `to_chunks` ({from_chunks}). Check strides.') attention_output_chunks = [] attention_probs_chunks = [] for ((from_start, from_end), (to_start, to_end)) in zip(from_chunks, to_chunks): from_tensor_chunk = from_tensor[:, from_start:from_end, :] to_tensor_chunk = to_tensor[:, to_start:to_end, :] attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end] if self.always_attend_to_first_position: cls_attention_mask = attention_mask[:, from_start:from_end, 0:1] attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2) cls_position = to_tensor[:, 0:1, :] to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1) attention_outputs_chunk = self.self(from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions) attention_output_chunks.append(attention_outputs_chunk[0]) if output_attentions: attention_probs_chunks.append(attention_outputs_chunk[1]) attention_output = torch.cat(attention_output_chunks, dim=1) attention_output = self.output(attention_output, hidden_states) outputs = (attention_output,) if not self.local: outputs = outputs + self_outputs[1:] else: outputs = outputs + tuple(attention_probs_chunks) return outputs class CanineIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class CanineOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: Tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class CanineLayer(nn.Module): def __init__(self, config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = CanineAttention(config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride) self.intermediate = CanineIntermediate(config) self.output = CanineOutput(config) def forward(self, hidden_states: Tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class CanineEncoder(nn.Module): def __init__(self, config, local=False, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=128, attend_from_chunk_stride=128, attend_to_chunk_width=128, attend_to_chunk_stride=128): super().__init__() self.config = config self.layer = nn.ModuleList([CanineLayer(config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: Tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class CaninePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class CaninePredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class CanineLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = CaninePredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states: Tuple[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class CanineOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = CanineLMPredictionHead(config) def forward(self, sequence_output: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: prediction_scores = self.predictions(sequence_output) return prediction_scores class CaninePreTrainedModel(PreTrainedModel): config_class = CanineConfig load_tf_weights = load_tf_weights_in_canine base_model_prefix = 'canine' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CANINE_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`CanineConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CANINE_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare CANINE Model transformer outputting raw hidden-states without any specific head on top.', CANINE_START_DOCSTRING) class CanineModel(CaninePreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config shallow_config = copy.deepcopy(config) shallow_config.num_hidden_layers = 1 self.char_embeddings = CanineEmbeddings(config) self.initial_char_encoder = CanineEncoder(shallow_config, local=True, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=config.local_transformer_stride, attend_from_chunk_stride=config.local_transformer_stride, attend_to_chunk_width=config.local_transformer_stride, attend_to_chunk_stride=config.local_transformer_stride) self.chars_to_molecules = CharactersToMolecules(config) self.encoder = CanineEncoder(config) self.projection = ConvProjection(config) self.final_char_encoder = CanineEncoder(shallow_config) self.pooler = CaninePooler(config) if add_pooling_layer else None self.post_init() def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask): (batch_size, from_seq_length) = (from_tensor.shape[0], from_tensor.shape[1]) to_seq_length = to_mask.shape[1] to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float() broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device) mask = broadcast_ones * to_mask return mask def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int): (batch_size, char_seq_len) = char_attention_mask.shape poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len)) pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)(poolable_char_mask.float()) molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1) return molecule_attention_mask def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: torch.Tensor) -> torch.Tensor: rate = self.config.downsampling_rate molecules_without_extra_cls = molecules[:, 1:, :] repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2) last_molecule = molecules[:, -1:, :] remainder_length = torch.fmod(torch.tensor(char_seq_length), torch.tensor(rate)).item() remainder_repeated = torch.repeat_interleave(last_molecule, repeats=remainder_length + rate, dim=-2) return torch.cat([repeated, remainder_repeated], dim=-2) @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CanineModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CanineModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) molecule_attention_mask = self._downsample_attention_mask(attention_mask, downsampling_rate=self.config.downsampling_rate) extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask(molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1])) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) input_char_embeddings = self.char_embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) char_attention_mask = self._create_3d_attention_mask_from_input_mask(input_ids if input_ids is not None else inputs_embeds, attention_mask) init_chars_encoder_outputs = self.initial_char_encoder(input_char_embeddings, attention_mask=char_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states) input_char_encoding = init_chars_encoder_outputs.last_hidden_state init_molecule_encoding = self.chars_to_molecules(input_char_encoding) encoder_outputs = self.encoder(init_molecule_encoding, attention_mask=extended_molecule_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) molecule_sequence_output = encoder_outputs[0] pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1]) concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1) sequence_output = self.projection(concat) final_chars_encoder_outputs = self.final_char_encoder(sequence_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states) sequence_output = final_chars_encoder_outputs.last_hidden_state if output_hidden_states: deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1] all_hidden_states = all_hidden_states + init_chars_encoder_outputs.hidden_states + deep_encoder_hidden_states + final_chars_encoder_outputs.hidden_states if output_attentions: deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1] all_self_attentions = all_self_attentions + init_chars_encoder_outputs.attentions + deep_encoder_self_attentions + final_chars_encoder_outputs.attentions if not return_dict: output = (sequence_output, pooled_output) output += tuple((v for v in [all_hidden_states, all_self_attentions] if v is not None)) return output return CanineModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=all_hidden_states, attentions=all_self_attentions) @add_start_docstrings('\n CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', CANINE_START_DOCSTRING) class CanineForSequenceClassification(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CANINE Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CANINE_START_DOCSTRING) class CanineForMultipleChoice(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CANINE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', CANINE_START_DOCSTRING) class CanineForTokenClassification(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n CANINE Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', CANINE_START_DOCSTRING) class CanineForQuestionAnswering(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='Splend1dchan/canine-c-squad', output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="'nice puppet'", expected_loss=8.81) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/canine/tokenization_canine.py """""" from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) UNICODE_VOCAB_SIZE = 1114112 PAD = 0 CLS = 57344 SEP = 57345 BOS = 57346 MASK = 57347 RESERVED = 57348 SPECIAL_CODEPOINTS: Dict[int, str] = {CLS: '[CLS]', SEP: '[SEP]', BOS: '[BOS]', MASK: '[MASK]', PAD: '[PAD]', RESERVED: '[RESERVED]'} SPECIAL_CODEPOINTS_BY_NAME: Dict[str, int] = {name: codepoint for (codepoint, name) in SPECIAL_CODEPOINTS.items()} class CanineTokenizer(PreTrainedTokenizer): def __init__(self, bos_token=chr(CLS), eos_token=chr(SEP), sep_token=chr(SEP), cls_token=chr(CLS), pad_token=chr(PAD), mask_token=chr(MASK), add_prefix_space=False, model_max_length=2048, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self._special_codepoints: Dict[str, int] = {} for (codepoint, name) in SPECIAL_CODEPOINTS.items(): self._special_codepoints[name] = codepoint self._special_codepoint_strings: Dict[int, str] = {codepoint: name for (name, codepoint) in self._special_codepoints.items()} self._unicode_vocab_size = UNICODE_VOCAB_SIZE self._num_special_tokens = len(self._special_codepoints) super().__init__(bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, model_max_length=model_max_length, **kwargs) @property def vocab_size(self) -> int: return self._unicode_vocab_size def get_vocab(self): vocab = {chr(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return list(text) def _convert_token_to_id(self, token: str) -> int: try: return ord(token) except TypeError: raise ValueError(f"invalid token: '{token}'") def _convert_id_to_token(self, index: int) -> str: try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(index) except TypeError: raise ValueError(f'invalid id: {index}') def convert_tokens_to_string(self, tokens): return ''.join(tokens) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] result = cls + token_ids_0 + sep if token_ids_1 is not None: result += token_ids_1 + sep return result def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) result = [1] + [0] * len(token_ids_0) + [1] if token_ids_1 is not None: result += [0] * len(token_ids_1) + [1] return result def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] result = len(cls + token_ids_0 + sep) * [0] if token_ids_1 is not None: result += len(token_ids_1 + sep) * [1] return result def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None): return () # File: transformers-main/src/transformers/models/chameleon/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available _import_structure = {'configuration_chameleon': ['ChameleonConfig', 'ChameleonVQVAEConfig'], 'processing_chameleon': ['ChameleonProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_chameleon'] = ['ChameleonForConditionalGeneration', 'ChameleonModel', 'ChameleonPreTrainedModel', 'ChameleonVQVAE'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_chameleon'] = ['ChameleonImageProcessor'] if TYPE_CHECKING: from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig from .processing_chameleon import ChameleonProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chameleon import ChameleonForConditionalGeneration, ChameleonModel, ChameleonPreTrainedModel, ChameleonVQVAE try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_chameleon import ChameleonImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/chameleon/configuration_chameleon.py """""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ChameleonVQVAEConfig(PretrainedConfig): model_type = 'chameleon_vqgan' def __init__(self, embed_dim: int=256, num_embeddings: int=8192, double_latent: bool=False, latent_channels: int=256, resolution: int=512, in_channels: int=3, base_channels: int=128, channel_multiplier: List[int]=[1, 1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: List[int]=None, dropout: float=0.0, attn_type: str='vanilla', initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_embeddings = num_embeddings self.double_latent = double_latent self.latent_channels = latent_channels self.resolution = resolution self.in_channels = in_channels self.base_channels = base_channels self.channel_multiplier = channel_multiplier self.num_res_blocks = num_res_blocks self.attn_resolutions = attn_resolutions self.dropout = dropout self.attn_type = attn_type self.initializer_range = initializer_range class ChameleonConfig(PretrainedConfig): model_type = 'chameleon' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=65536, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, model_parallel_size=1, swin_norm=False, vq_config=None, vocabulary_map=None, mlp_bias=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_bias = mlp_bias self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.model_parallel_size = model_parallel_size self.swin_norm = swin_norm if vq_config is None: vq_config = {} logger.info('vq_config is None. initializing the ChameleonVQConfig with default values.') self.vq_config = ChameleonVQVAEConfig(**vq_config) self.vocabulary_map = vocabulary_map super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") # File: transformers-main/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py import argparse import gc import json import os import requests import torch import yaml from accelerate import init_empty_weights from PIL import Image from transformers import ChameleonConfig, ChameleonForCausalLM, ChameleonImageProcessor, ChameleonProcessor try: from transformers import LlamaTokenizerFast except ImportError: raise ValueError("Chameleon conversion supports only FastTokenizer and LlamaTokenizerFast can't be imported! Update your `tokenizers` library and re-run the tokenizer conversion.") '' NUM_SHARDS = {'7B': 1, '30B': 4} VOCAB_SIZE = 65536 def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, 'r') as f: return json.load(f) def write_json(text, path): with open(path, 'w') as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, chameleon_version=1): os.makedirs(model_path, exist_ok=True) input_model_path = os.path.join(input_base_path, 'models', model_size.lower()) params_path = os.path.join(input_model_path, 'params.json') consolidate_params_path = os.path.join(input_model_path, 'consolidate_params.json') params = read_json(params_path) if os.path.isfile(consolidate_params_path): params = {**params, **read_json(consolidate_params_path)} num_shards = NUM_SHARDS[model_size] model_parallel_size = params['model_parallel_size'] params = params.get('model', params) n_layers = params['n_layers'] n_heads = params['n_heads'] n_heads_per_shard = n_heads // num_shards dim = params['dim'] dims_per_head = dim // n_heads base = params.get('rope_theta', 10000.0) swin_norm = params['swin_norm'] if base > 10000.0: max_position_embeddings = 16384 elif chameleon_version == 1: max_position_embeddings = 4096 else: raise NotImplementedError(f'Version {chameleon_version} of chameleon is not supported yet. Current supported versions of chameleon are [1].') if params.get('n_kv_heads', None) is not None: num_key_value_heads = params['n_kv_heads'] num_local_key_value_heads = n_heads_per_shard // num_key_value_heads key_value_dim = dim // num_key_value_heads else: num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim print(f'Fetching all parameters from the checkpoint at {input_model_path}.') if num_shards == 1: loaded = None for possible_name in ['consolidated.pth', 'consolidated.00.pth']: possible_path = os.path.join(input_model_path, possible_name) if os.path.exists(possible_path): loaded = torch.load(possible_path, map_location='cpu') break assert loaded is not None else: loaded = [torch.load(os.path.join(input_model_path, f'consolidated.{i:02d}.pth'), map_location='cpu') for i in range(num_shards)] def permute(w, n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) state_dict = {} for layer_i in range(n_layers): if num_shards == 1: state_dict.update({f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(loaded[f'layers.{layer_i}.attention.wq.weight'], n_heads=n_heads), f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(loaded[f'layers.{layer_i}.attention.wk.weight'], n_heads=num_key_value_heads, dim1=key_value_dim), f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'], f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'], f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'], f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'], f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'], f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'], f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight']}) state_dict[f'model.layers.{layer_i}.self_attn.q_norm.weight'] = loaded[f'layers.{layer_i}.attention.q_normalization.weight'].view(dims_per_head // 2, 2).t().reshape(1, -1).repeat_interleave(n_heads, 0) state_dict[f'model.layers.{layer_i}.self_attn.q_norm.bias'] = loaded[f'layers.{layer_i}.attention.q_normalization.bias'].view(dims_per_head // 2, 2).t().reshape(1, -1).repeat_interleave(n_heads, 0) state_dict[f'model.layers.{layer_i}.self_attn.k_norm.weight'] = loaded[f'layers.{layer_i}.attention.k_normalization.weight'].view(dims_per_head // 2, 2).t().reshape(1, -1).repeat_interleave(num_key_value_heads, 0) state_dict[f'model.layers.{layer_i}.self_attn.k_norm.bias'] = loaded[f'layers.{layer_i}.attention.k_normalization.bias'].view(dims_per_head // 2, 2).t().reshape(1, -1).repeat_interleave(num_key_value_heads, 0) else: state_dict.update({f'model.layers.{layer_i}.input_layernorm.weight': torch.stack([l[f'layers.{layer_i}.attention_norm.weight'] for l in loaded]).mean(dim=0), f'model.layers.{layer_i}.post_attention_layernorm.weight': torch.stack([l[f'layers.{layer_i}.ffn_norm.weight'] for l in loaded]).mean(dim=0)}) state_dict[f'model.layers.{layer_i}.self_attn.q_proj.weight'] = permute(torch.cat([loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(dim, dim), n_heads=n_heads) state_dict[f'model.layers.{layer_i}.self_attn.k_proj.weight'] = permute(torch.cat([loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(num_local_key_value_heads, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(key_value_dim, dim), n_heads=num_key_value_heads, dim1=key_value_dim) state_dict[f'model.layers.{layer_i}.self_attn.q_norm.weight'] = torch.cat([l[f'layers.{layer_i}.attention.q_normalization.weight'].unsqueeze(0) for l in loaded]).view(num_shards, dims_per_head // 2, 2).transpose(1, 2).reshape(num_shards, -1).repeat_interleave(n_heads // num_shards, 0) state_dict[f'model.layers.{layer_i}.self_attn.q_norm.bias'] = torch.cat([l[f'layers.{layer_i}.attention.q_normalization.bias'].unsqueeze(0) for l in loaded]).view(num_shards, dims_per_head // 2, 2).transpose(1, 2).reshape(num_shards, -1).repeat_interleave(n_heads // num_shards, 0) state_dict[f'model.layers.{layer_i}.self_attn.k_norm.weight'] = torch.cat([l[f'layers.{layer_i}.attention.k_normalization.weight'].unsqueeze(0) for l in loaded]).view(num_shards, dims_per_head // 2, 2).transpose(1, 2).reshape(num_shards, -1).repeat_interleave(num_key_value_heads // num_shards, 0) state_dict[f'model.layers.{layer_i}.self_attn.k_norm.bias'] = torch.cat([l[f'layers.{layer_i}.attention.k_normalization.bias'].unsqueeze(0) for l in loaded]).view(num_shards, dims_per_head // 2, 2).transpose(1, 2).reshape(num_shards, -1).repeat_interleave(num_key_value_heads // num_shards, 0) state_dict[f'model.layers.{layer_i}.self_attn.v_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(num_local_key_value_heads, dims_per_head, dim) for i in range(num_shards)], dim=0).reshape(key_value_dim, dim) state_dict[f'model.layers.{layer_i}.self_attn.o_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(num_shards)], dim=1) state_dict[f'model.layers.{layer_i}.mlp.gate_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(num_shards)], dim=0) state_dict[f'model.layers.{layer_i}.mlp.down_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(num_shards)], dim=1) state_dict[f'model.layers.{layer_i}.mlp.up_proj.weight'] = torch.cat([loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(num_shards)], dim=0) if num_shards == 1: state_dict.update({'model.embed_tokens.weight': loaded['tok_embeddings.weight'], 'model.norm.weight': loaded['norm.weight'], 'lm_head.weight': loaded['output.weight']}) else: state_dict.update({'model.embed_tokens.weight': torch.cat([loaded[i]['tok_embeddings.weight'] for i in range(num_shards)], dim=1), 'model.norm.weight': torch.stack([loaded[i]['norm.weight'] for i in range(num_shards)]).mean(dim=0), 'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(num_shards)], dim=0)}) vqgan_path = os.path.join(input_base_path, 'tokenizer/vqgan.ckpt') vqgan_state_dict = torch.load(vqgan_path, map_location='cpu')['state_dict'] for (k, v) in vqgan_state_dict.items(): if 'decoder' in k: continue state_dict[f'model.vqmodel.{k}'] = v ffn_dim_multiplier = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1 multiple_of = params['multiple_of'] if 'multiple_of' in params else 256 with open(os.path.join(input_base_path, 'tokenizer/text_tokenizer.json')) as tokenizer_file: tokenizer_config = json.load(tokenizer_file) vocabulary_map = tokenizer_config['model']['vocab'] vocabulary_map[''] = vocabulary_map[''] del vocabulary_map[''] for token in tokenizer_config['added_tokens']: if token['content'] == '': token['content'] = '' with open(os.path.join(input_base_path, 'tokenizer/text_tokenizer_modified.json'), 'w') as f: json.dump(tokenizer_config, f) vq_keys_to_replace = [('ch', 'base_channels'), ('out_ch', 'out_channels'), ('n_embed', 'num_embeddings'), ('ch_mult', 'channel_multiplier'), ('double_z', 'double_latent'), ('z_channels', 'latent_channels')] with open(os.path.join(input_base_path, 'tokenizer/vqgan.yaml')) as vqgan_cfg_file: vq_config = yaml.safe_load(vqgan_cfg_file)['model']['params'] vq_config.update(**vq_config['ddconfig']) for (old, new) in vq_keys_to_replace: vq_config[new] = vq_config[old] del vq_config['ddconfig'] del vq_config['ckpt_path'] del vq_config['lossconfig'] config = ChameleonConfig(hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params['n_heads'], num_hidden_layers=params['n_layers'], rms_norm_eps=params['norm_eps'], num_key_value_heads=num_key_value_heads, vocab_size=VOCAB_SIZE, rope_theta=base, max_position_embeddings=max_position_embeddings, model_parallel_size=model_parallel_size, swin_norm=swin_norm, vq_config=vq_config, vocabulary_map=vocabulary_map) with init_empty_weights(): model = ChameleonForCausalLM(config) model.load_state_dict(state_dict, assign=True, strict=False) model.save_pretrained(model_path, safe_serialization=True) tokenizer = LlamaTokenizerFast(tokenizer_file=os.path.join(input_base_path, 'tokenizer/text_tokenizer_modified.json'), legacy=False) tokenizer.sep_token_id = 8710 tokenizer.pad_token_id = 1 image_processor = ChameleonImageProcessor() processor = ChameleonProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(model_path) del state_dict del loaded del vqgan_state_dict gc.collect() print('Loading the checkpoint in a Chameleon model...') print('*' * 100) model = ChameleonForCausalLM.from_pretrained(model_path, attn_implementation='eager', torch_dtype=torch.bfloat16, device_map='auto') processor = ChameleonProcessor.from_pretrained(model_path) prompt = "I'm very intrigued by this work of art:Please tell me about the artist." image = Image.open(requests.get('https://uploads4.wikiart.org/images/paul-klee/death-for-the-idea-1915.jpg!Large.jpg', stream=True).raw) inputs = processor(prompt, images=image, return_tensors='pt').to(model.device, torch.bfloat16) length = inputs.input_ids.shape[1] out = model.generate(**inputs, max_new_tokens=40, do_sample=False) generated_text = processor.batch_decode(out[:, length:], skip_special_tokens=True)[0] print(f'Generation for single-image: {generated_text}') print('*' * 100) prompt = 'I used to know a lot about constellations when I was younger, but as I grew older, I forgot most of what I knew. These are the only two constellations that I really remember now.I would like for you to tell me about 3 more constellations and give me a little bit of history about the constellation.' image = Image.open(requests.get('https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg', stream=True).raw) image_2 = Image.open(requests.get('https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg', stream=True).raw) inputs = processor(prompt, images=[image, image_2], return_tensors='pt').to(model.device, dtype=torch.bfloat16) length = inputs.input_ids.shape[1] out = model.generate(**inputs, max_new_tokens=50, do_sample=False) generated_text = processor.batch_decode(out[:, length:], skip_special_tokens=True)[0] print(f'Generation for multi-image: {generated_text}') def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_dir', help='Location of Chameleon weights') parser.add_argument('--model_size', choices=['7B', '30B'], help=' models correspond to the finetuned versions, and are specific to the Chameleon official release. For more details on Chameleon, checkout the original repo: https://github.com/facebookresearch/chameleon') parser.add_argument('--output_dir', help='Location to write HF model') parser.add_argument('--test_inference', action='store_true', help="Whether to load the model for generation to test it's converted correctly.") parser.add_argument('--chameleon_version', choices=[1], default=1, type=int, help='Version of the Chameleon model to convert') args = parser.parse_args() write_model(model_path=args.output_dir, input_base_path=args.input_dir, model_size=args.model_size, chameleon_version=args.chameleon_version) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/chameleon/image_processing_chameleon.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL def make_batched_images(images) -> List[List[ImageInput]]: if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]): return [img for img_list in images for img in img_list] elif isinstance(images, (list, tuple)) and is_valid_image(images[0]): return images elif is_valid_image(images): return [images] raise ValueError(f'Could not make batched video from {images}') class ChameleonImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PIL.Image.LANCZOS, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=0.0078, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 512} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 512, 'width': 512} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [1.0, 1.0, 1.0] self.image_std = image_std if image_std is not None else [1.0, 1.0, 1.0] self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_batched_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [self.blend_rgba(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) def blend_rgba(self, image: ImageInput) -> ImageInput: if not isinstance(image, PIL.Image.Image): return image elif image.mode == 'RGB': return image img_rgba = np.array(image.convert('RGBA')) if not (img_rgba[:, :, 3] < 255).any(): return image.convert('RGB') alpha = img_rgba[:, :, 3] / 255.0 img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3] return PIL.Image.fromarray(img_rgb.astype('uint8'), 'RGB') # File: transformers-main/src/transformers/models/chameleon/modeling_chameleon.py """""" import math from functools import cached_property from typing import Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig if is_flash_attn_2_available(): from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'ChameleonConfig' _CHECKPOINT_FOR_DOC = 'meta/chameleon-7b' _EXPECTED_OUTPUT_SHAPE = [1, 7, 4096] _SEQ_CLASS_EXPECTED_LOSS = 1.03 _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" class ChameleonRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' ALL_LAYERNORM_LAYERS.append(ChameleonRMSNorm) class ChameleonRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): super().__init__() self.scaling_factor = scaling_factor self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = max_position_embeddings @torch.no_grad() def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) class ChameleonLinearScalingRotaryEmbedding(ChameleonRotaryEmbedding): def forward(self, x, position_ids): position_ids = position_ids.float() / self.scaling_factor (cos, sin) = super().forward(x, position_ids) return (cos, sin) class ChameleonDynamicNTKScalingRotaryEmbedding(ChameleonRotaryEmbedding): def forward(self, x, position_ids): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_position_embeddings: base = self.base * (self.scaling_factor * seq_len / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) (cos, sin) = super().forward(x, position_ids) return (cos, sin) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class ChameleonMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class ChameleonLayerNorm(nn.LayerNorm): def __init__(self, hidden_size, *args, **kwargs): super().__init__(hidden_size, *args, **kwargs) self.normalized_shape = (hidden_size[-1],) def forward(self, hidden_states): hidden_states = F.layer_norm(hidden_states, self.normalized_shape, None, None, eps=1e-05) hidden_states = hidden_states * self.weight + self.bias return hidden_states def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ChameleonAttention(nn.Module): def __init__(self, config: ChameleonConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.model_parallel_size = config.model_parallel_size if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) self.q_norm = ChameleonLayerNorm((self.num_heads, self.head_dim)) self.k_norm = ChameleonLayerNorm((self.num_key_value_heads, self.head_dim)) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = ChameleonRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) else: scaling_type = self.config.rope_scaling['type'] scaling_factor = self.config.rope_scaling['factor'] if scaling_type == 'linear': self.rotary_emb = ChameleonLinearScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) elif scaling_type == 'dynamic': self.rotary_emb = ChameleonDynamicNTKScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.reshape(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class ChameleonFlashAttention2(ChameleonAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if isinstance(past_key_value, StaticCache): raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers') output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class ChameleonSdpaAttention(ChameleonAttention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('ChameleonModel is using ChameleonSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.reshape(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, None) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None and cache_position is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return (attn_output, None, past_key_value) CHAMELEON_ATTENTION_CLASSES = {'eager': ChameleonAttention, 'flash_attention_2': ChameleonFlashAttention2, 'sdpa': ChameleonSdpaAttention} class ChameleonDecoderLayer(nn.Module): def __init__(self, config: ChameleonConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CHAMELEON_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = ChameleonMLP(config) self.input_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class ChameleonSwinDecoderLayer(nn.Module): def __init__(self, config: ChameleonConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CHAMELEON_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = ChameleonMLP(config) self.input_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = self.input_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class ChameleonVQVAEVectorQuantizer(nn.Module): def __init__(self, config): super().__init__() self.num_embeddings = config.num_embeddings self.embedding_dim = config.embed_dim self.beta = getattr(config, 'beta', 0.25) self.embedding = nn.Embedding(self.num_embeddings, self.embedding_dim) self.re_embed = self.num_embeddings def forward(self, hidden_state: torch.Tensor): hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state_flattened = hidden_state.view(-1, self.embedding_dim) distances = torch.sum(hidden_state_flattened ** 2, dim=1, keepdim=True) + torch.sum(self.embedding.weight ** 2, dim=1) - 2 * torch.einsum('bd,dn->bn', hidden_state_flattened, self.embedding.weight.transpose(0, 1)) min_encoding_indices = torch.argmin(distances, dim=1) hidden_state_quant = self.embedding(min_encoding_indices).view(hidden_state.shape) loss = torch.mean((hidden_state_quant.detach() - hidden_state) ** 2) + self.beta * torch.mean((hidden_state_quant - hidden_state.detach()) ** 2) hidden_state_quant = hidden_state + (hidden_state_quant - hidden_state).detach() hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous() return (hidden_state_quant, loss, min_encoding_indices) class ChameleonVQVAEEncoderConvDownsample(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, hidden_states): hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode='constant', value=0) hidden_states = self.conv(hidden_states) return hidden_states class ChameleonVQVAEEncoderResnetBlock(nn.Module): def __init__(self, config, in_channels, out_channels=None, conv_shortcut=False): super().__init__() self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-06, affine=True) self.dropout = torch.nn.Dropout(config.dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states *= torch.sigmoid(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states *= torch.sigmoid(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels: if self.use_conv_shortcut: residual = self.conv_shortcut(residual) else: residual = self.nin_shortcut(residual) return residual + hidden_states class ChameleonVQVAEEncoderAttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm(hidden_states) query_states = self.q(hidden_states) key_states = self.k(hidden_states) value_states = self.v(hidden_states) (batch_size, channels, height, width) = query_states.shape query_states = query_states.reshape(batch_size, channels, height * width).permute(0, 2, 1) key_states = key_states.reshape(batch_size, channels, height * width) attn_weights = torch.bmm(query_states, key_states) attn_weights = attn_weights * int(channels) ** (-0.5) attn_weights = F.softmax(attn_weights, dim=2) value_states = value_states.reshape(batch_size, channels, height * width) attn_weights = attn_weights.permute(0, 2, 1) attn_output = torch.bmm(value_states, attn_weights).reshape(batch_size, channels, height, width) attn_output = self.proj_out(attn_output) return residual + attn_output class ChameleonVQVAEEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_resolutions = len(config.channel_multiplier) self.num_res_blocks = config.num_res_blocks base_channels = config.base_channels resolution = config.resolution in_channels = config.in_channels double_latent = config.double_latent latent_channels = config.latent_channels channel_multiplier = config.channel_multiplier self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1) curr_res = resolution in_channel_multiplier = (1,) + tuple(channel_multiplier) self.in_channel_multiplier = in_channel_multiplier self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = base_channels * in_channel_multiplier[i_level] block_out = base_channels * channel_multiplier[i_level] for i_block in range(self.num_res_blocks): block.append(ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_out)) block_in = block_out if config.attn_resolutions is not None and curr_res in config.attn_resolutions and (config.attn_type == 'vanilla'): attn.append(ChameleonVQVAEEncoderAttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = ChameleonVQVAEEncoderConvDownsample(block_in) curr_res = curr_res // 2 self.down.append(down) self.mid = nn.Module() self.mid.block_1 = ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_in) self.mid.attn_1 = ChameleonVQVAEEncoderAttnBlock(block_in) if config.attn_type == 'vanilla' else nn.Identity() self.mid.block_2 = ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_in) self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-06, affine=True) self.conv_out = torch.nn.Conv2d(block_in, 2 * latent_channels if double_latent else latent_channels, kernel_size=3, stride=1, padding=1) def forward(self, pixel_values: torch.LongTensor): hidden_states = [self.conv_in(pixel_values)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): hidden_state = self.down[i_level].block[i_block](hidden_states[-1]) if len(self.down[i_level].attn) > 0: hidden_state = self.down[i_level].attn[i_block](hidden_state) hidden_states.append(hidden_state) if i_level != self.num_resolutions - 1: hidden_states.append(self.down[i_level].downsample(hidden_states[-1])) last_hidden_state = hidden_states[-1] last_hidden_state = self.mid.block_1(last_hidden_state) last_hidden_state = self.mid.attn_1(last_hidden_state) last_hidden_state = self.mid.block_2(last_hidden_state) last_hidden_state = self.norm_out(last_hidden_state) last_hidden_state *= torch.sigmoid(last_hidden_state) last_hidden_state = self.conv_out(last_hidden_state) return last_hidden_state CHAMELEON_VQ_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ChameleonVQVAEConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The VQ-VAE model used in Chameleon for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman](https://arxiv.org/abs/2203.13131).\n ', CHAMELEON_VQ_START_DOCSTRING) class ChameleonVQVAE(PreTrainedModel): config_class = ChameleonVQVAEConfig _no_split_modules = ['ChameleonVQVAEVectorQuantizer'] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) elif isinstance(module, nn.GroupNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def __init__(self, config: ChameleonVQVAEConfig): super().__init__(config) self.encoder = ChameleonVQVAEEncoder(config) self.quantize = ChameleonVQVAEVectorQuantizer(config) self.quant_conv = torch.nn.Conv2d(config.latent_channels, config.embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(config.embed_dim, config.latent_channels, 1) self.eval() def encode(self, pixel_values: torch.LongTensor): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) (quant, emb_loss, indices) = self.quantize(hidden_states) return (quant, emb_loss, indices) class ChameleonImageVocabularyMapping: def __init__(self, vocab_map): self.vocab_map = vocab_map self.image_token_id = vocab_map.get('') @cached_property def val2name(self): return {v: k for (k, v) in self.vocab_map.items()} @cached_property def image_tokens(self): return sorted([val for (name, val) in self.vocab_map.items() if name.startswith('IMGIMG')]) @cached_property def bpe2img(self): img_tkn_chr_mapping = {chr(ord('A') + i): str(i) for i in range(10)} def remap(old_name: str) -> str: return ''.join((img_tkn_chr_mapping.get(c, c) for c in old_name[len('IMGIMG'):-1])) return {tok: int(remap(self.val2name[tok])) for tok in self.image_tokens} @cached_property def img2bpe(self): return {v: k for (k, v) in self.bpe2img.items()} @cached_property def bpe2img_search_tensors(self): return (torch.tensor(sorted(self.bpe2img.keys())), torch.tensor(sorted(self.bpe2img.values()))) @cached_property def img2bpe_mapping_tensor(self): mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int) for (k, v) in self.img2bpe.items(): mapping[k] = v return mapping def convert_img2bpe(self, img_batch: torch.Tensor) -> torch.Tensor: device = img_batch.device img_tokens = self.img2bpe_mapping_tensor[img_batch.to('cpu')] return img_tokens.to(device) CHAMELEON_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ChameleonConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare chameleon Model outputting raw hidden-states without any specific head on top.', CHAMELEON_START_DOCSTRING) class ChameleonPreTrainedModel(PreTrainedModel): config_class = ChameleonConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['ChameleonDecoderLayer', 'ChameleonSwinDecoderLayer'] _skip_keys_device_placement = ['past_key_values', 'causal_mask'] _supports_flash_attn_2 = True _supports_sdpa = True _supports_quantized_cache = True _supports_cache_class = True _supports_static_cache = True _supports_param_buffer_assignment = False def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, ChameleonVQVAE): module.apply(module._init_weights) elif isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() CHAMELEON_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\n The tensors corresponding to the input images. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`ChameleonImageProcessor.__call__`] for details.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Cache`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Should always be a [`~cache_utils.Cache`] instance and the model will output the same cache instance.\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare chameleon Model outputting raw hidden-states without any specific head on top.', CHAMELEON_START_DOCSTRING) class ChameleonModel(ChameleonPreTrainedModel): def __init__(self, config: ChameleonConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.vocabulary_mapping = ChameleonImageVocabularyMapping(config.vocabulary_map) decoder_layer = ChameleonDecoderLayer if not self.config.swin_norm else ChameleonSwinDecoderLayer self.layers = nn.ModuleList([decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.vqmodel = ChameleonVQVAE(config.vq_config) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def get_image_tokens(self, pixel_values: torch.FloatTensor): batch_size = pixel_values.shape[0] (_, _, image_toks) = self.vqmodel.encode(pixel_values) bpe_toks = self.vocabulary_mapping.convert_img2bpe(image_toks) bpe_toks = bpe_toks.view(batch_size, -1) return bpe_toks @add_start_docstrings_to_model_forward(CHAMELEON_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if pixel_values is not None and inputs_embeds is not None: raise ValueError('You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one') if pixel_values is not None: image_tokens = self.get_image_tokens(pixel_values) special_image_mask = input_ids == self.vocabulary_mapping.image_token_id image_tokens = image_tokens.to(input_ids.device, input_ids.dtype) input_ids = input_ids.masked_scatter(special_image_mask, image_tokens) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position) else: layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('Chameleon Model with a head on top used for outputting logits for next token prediction.', CHAMELEON_START_DOCSTRING) class ChameleonForConditionalGeneration(ChameleonPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = ChameleonModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(CHAMELEON_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, pixel_values: torch.FloatTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() image_tokens = self.model.vocabulary_mapping.image_tokens logits[:, :, image_tokens] = torch.finfo(logits.dtype).min loss = None if labels is not None: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids.contiguous()} if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs # File: transformers-main/src/transformers/models/chameleon/processing_chameleon.py """""" from typing import List, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class ChameleonProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] tokenizer_class = ('LlamaTokenizer', 'LlamaTokenizerFast') image_processor_class = 'ChameleonImageProcessor' def __init__(self, image_processor, tokenizer, image_seq_length: int=1024, image_token: str=''): self.image_seq_length = image_seq_length self.image_token = image_token self.image_start_token = '' self.image_end_token = '' super().__init__(image_processor, tokenizer) def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, images: ImageInput=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: int=None, return_tensors: Optional[Union[str, TensorType]]=TensorType.PYTORCH, return_for_text_completion: bool=False) -> BatchFeature: if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') prompt_strings = [] one_img_tokens = self.image_start_token + self.image_token * self.image_seq_length + self.image_end_token for sample in text: sample = sample.replace(self.image_token, one_img_tokens) if not return_for_text_completion: sample += self.tokenizer.sep_token prompt_strings.append(sample) data = self.tokenizer(prompt_strings, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length) if images is not None: pixel_values = self.image_processor(images, return_tensors=return_tensors)['pixel_values'] data['pixel_values'] = pixel_values return BatchFeature(data=data, tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/chinese_clip/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_chinese_clip': ['ChineseCLIPConfig', 'ChineseCLIPOnnxConfig', 'ChineseCLIPTextConfig', 'ChineseCLIPVisionConfig'], 'processing_chinese_clip': ['ChineseCLIPProcessor']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_chinese_clip'] = ['ChineseCLIPFeatureExtractor'] _import_structure['image_processing_chinese_clip'] = ['ChineseCLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_chinese_clip'] = ['ChineseCLIPModel', 'ChineseCLIPPreTrainedModel', 'ChineseCLIPTextModel', 'ChineseCLIPVisionModel'] if TYPE_CHECKING: from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/chinese_clip/configuration_chinese_clip.py """""" import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class ChineseCLIPTextConfig(PretrainedConfig): model_type = 'chinese_clip_text_model' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, initializer_factor=1.0, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'chinese_clip': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ChineseCLIPVisionConfig(PretrainedConfig): model_type = 'chinese_clip_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'chinese_clip': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ChineseCLIPConfig(PretrainedConfig): model_type = 'chinese_clip' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()} for (key, value) in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.') self.text_config = ChineseCLIPTextConfig(**text_config) self.vision_config = ChineseCLIPVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_text_vision_configs(cls, text_config: ChineseCLIPTextConfig, vision_config: ChineseCLIPVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) class ChineseCLIPOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1, framework: Optional['TensorType']=None) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework) image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size, framework=framework) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14 # File: transformers-main/src/transformers/models/chinese_clip/convert_chinese_clip_original_pytorch_to_hf.py import argparse import torch from transformers import ChineseCLIPConfig, ChineseCLIPModel def copy_attn_layer(hf_attn_layer, pt_weights, prefix): (q_proj, k_proj, v_proj) = pt_weights[f'{prefix}.in_proj_weight'].chunk(3, dim=0) (q_proj_bias, k_proj_bias, v_proj_bias) = pt_weights[f'{prefix}.in_proj_bias'].chunk(3, dim=0) out_proj_weights = pt_weights[f'{prefix}.out_proj.weight'] out_proj_bias = pt_weights[f'{prefix}.out_proj.bias'] hf_attn_layer.q_proj.weight.data = q_proj hf_attn_layer.q_proj.bias.data = q_proj_bias hf_attn_layer.k_proj.weight.data = k_proj hf_attn_layer.k_proj.bias.data = k_proj_bias hf_attn_layer.v_proj.weight.data = v_proj hf_attn_layer.v_proj.bias.data = v_proj_bias hf_attn_layer.out_proj.weight.data = out_proj_weights hf_attn_layer.out_proj.bias.data = out_proj_bias def copy_mlp(hf_mlp, pt_weights, prefix): copy_linear(hf_mlp.fc1, pt_weights, f'{prefix}.c_fc') copy_linear(hf_mlp.fc2, pt_weights, f'{prefix}.c_proj') def copy_linear(hf_linear, pt_weights, prefix): hf_linear.weight.data = pt_weights[f'{prefix}.weight'].data hf_linear.bias.data = pt_weights[f'{prefix}.bias'].data def copy_layer(hf_layer, pt_weights, prefix): copy_linear(hf_layer.layer_norm1, pt_weights, f'{prefix}.ln_1') copy_linear(hf_layer.layer_norm2, pt_weights, f'{prefix}.ln_2') copy_mlp(hf_layer.mlp, pt_weights, f'{prefix}.mlp') copy_attn_layer(hf_layer.self_attn, pt_weights, f'{prefix}.attn') def copy_layers(hf_layers, pt_weights, prefix): for (layer_id, hf_layer) in enumerate(hf_layers): copy_layer(hf_layer, pt_weights, f'{prefix}.{layer_id}') def copy_text_model_and_projection(hf_model, pt_weights): hf_model.text_projection.weight.data = pt_weights['text_projection'].data.T for (name, param) in hf_model.text_model.named_parameters(): param.data = pt_weights[f'bert.{name}'].data def copy_vision_model_and_projection(hf_model, pt_weights): hf_model.visual_projection.weight.data = pt_weights['visual.proj'].data.T copy_linear(hf_model.vision_model.pre_layrnorm, pt_weights, 'visual.ln_pre') copy_linear(hf_model.vision_model.post_layernorm, pt_weights, 'visual.ln_post') hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_weights['visual.conv1.weight'].data hf_model.vision_model.embeddings.class_embedding.data = pt_weights['visual.class_embedding'].data hf_model.vision_model.embeddings.position_embedding.weight.data = pt_weights['visual.positional_embedding'].data copy_layers(hf_model.vision_model.encoder.layers, pt_weights, 'visual.transformer.resblocks') @torch.no_grad() def convert_chinese_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): assert config_path is not None, 'Please specify the ChineseCLIP model config of the corresponding model size.' config = ChineseCLIPConfig.from_pretrained(config_path) hf_model = ChineseCLIPModel(config).eval() pt_weights = torch.load(checkpoint_path, map_location='cpu')['state_dict'] pt_weights = {name[7:] if name.startswith('module.') else name: value for (name, value) in pt_weights.items()} copy_text_model_and_projection(hf_model, pt_weights) copy_vision_model_and_projection(hf_model, pt_weights) hf_model.logit_scale.data = pt_weights['logit_scale'].data hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output folder storing converted hf PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to original github format ChineseCLIP checkpoint.') parser.add_argument('--config_path', default=None, required=True, type=str, help='Path to hf config.json of model to convert.') args = parser.parse_args() convert_chinese_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) print('The conversion is finished!') # File: transformers-main/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py """""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor logger = logging.get_logger(__name__) class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ChineseCLIPImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/chinese_clip/image_processing_chinese_clip.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class ChineseCLIPImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size, default_to_square=False) output_size = get_resize_output_image_size(image, size=(size['height'], size['width']), default_to_square=False, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/chinese_clip/modeling_chinese_clip.py """""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'OFA-Sys/chinese-clip-vit-base-patch16' _CONFIG_FOR_DOC = 'ChineseCLIPConfig' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class ChineseCLIPOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class ChineseCLIPTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states CHINESE_CLIP_TEXT_SELF_ATTENTION_CLASSES = {'eager': ChineseCLIPTextSelfAttention} class ChineseCLIPTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = CHINESE_CLIP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ChineseCLIPVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class ChineseCLIPTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type='absolute') self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ChineseCLIPVisionLayer(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ChineseCLIPPreTrainedModel(PreTrainedModel): config_class = ChineseCLIPConfig base_model_prefix = 'chinese_clip' supports_gradient_checkpointing = True def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() CHINESE_CLIP_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" CHINESE_CLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CHINESE_CLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class ChineseCLIPVisionEncoder(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions) else: layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The text model from CHINESE_CLIP without any head or projection on top.', CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPTextConfig _no_split_modules = ['ChineseCLIPTextEmbeddings'] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('The vision model from CHINESE_CLIP without any head or projection on top.', CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPVisionConfig main_input_name = 'pixel_values' _no_split_modules = ['ChineseCLIPVisionEmbeddings', 'ChineseCLIPVisionAttention'] def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise TypeError(f'config.text_config is expected to be of type ChineseCLIPTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise TypeError(f'config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ChineseCLIPOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) if not return_dict: pooled_output = text_outputs[1] if pooled_output is None: text_outputs = (text_outputs[0],) + text_outputs[2:] output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return ChineseCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) # File: transformers-main/src/transformers/models/chinese_clip/processing_chinese_clip.py """""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class ChineseCLIPProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'ChineseCLIPImageProcessor' tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, text=None, images=None, return_tensors=None, **kwargs): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class # File: transformers-main/src/transformers/models/clap/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_clap': ['ClapAudioConfig', 'ClapConfig', 'ClapTextConfig'], 'processing_clap': ['ClapProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_clap'] = ['ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection'] _import_structure['feature_extraction_clap'] = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/clap/configuration_clap.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ClapTextConfig(PretrainedConfig): model_type = 'clap_text_model' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_factor=1.0, layer_norm_eps=1e-12, projection_dim=512, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, projection_hidden_act='relu', **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.projection_hidden_act = projection_hidden_act self.projection_dim = projection_dim @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clap': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ClapAudioConfig(PretrainedConfig): model_type = 'clap_audio_model' def __init__(self, window_size=8, num_mel_bins=64, spec_size=256, hidden_act='gelu', patch_size=4, patch_stride=[4, 4], num_classes=527, hidden_size=768, projection_dim=512, depths=[2, 2, 6, 2], num_attention_heads=[4, 8, 16, 32], enable_fusion=False, hidden_dropout_prob=0.1, fusion_type=None, patch_embed_input_channels=1, flatten_patch_embeds=True, patch_embeds_hidden_size=96, enable_patch_layer_norm=True, drop_path_rate=0.0, attention_probs_dropout_prob=0.0, qkv_bias=True, mlp_ratio=4.0, aff_block_r=4, num_hidden_layers=4, projection_hidden_act='relu', layer_norm_eps=1e-05, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.window_size = window_size self.num_mel_bins = num_mel_bins self.spec_size = spec_size self.patch_size = patch_size self.patch_stride = patch_stride self.num_classes = num_classes self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.window_size = window_size self.enable_fusion = enable_fusion self.fusion_type = fusion_type self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.projection_dim = projection_dim self.flatten_patch_embeds = flatten_patch_embeds self.patch_embeds_hidden_size = patch_embeds_hidden_size self.enable_patch_layer_norm = enable_patch_layer_norm self.drop_path_rate = drop_path_rate self.attention_probs_dropout_prob = attention_probs_dropout_prob self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.patch_embed_input_channels = patch_embed_input_channels self.aff_block_r = aff_block_r self.layer_norm_eps = layer_norm_eps self.initializer_factor = initializer_factor self.projection_hidden_act = projection_hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clap': config_dict = config_dict['audio_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ClapConfig(PretrainedConfig): model_type = 'clap' def __init__(self, text_config=None, audio_config=None, logit_scale_init_value=1 / 0.07, projection_dim=512, projection_hidden_act='relu', initializer_factor=1.0, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the ClapTextConfig with default values.') if audio_config is None: audio_config = {} logger.info('audio_config is None. initializing the ClapAudioConfig with default values.') self.text_config = ClapTextConfig(**text_config) self.audio_config = ClapAudioConfig(**audio_config) self.text_config.projection_dim = projection_dim self.audio_config.projection_dim = projection_dim self.text_config.projection_hidden_act = projection_hidden_act self.audio_config.projection_hidden_act = projection_hidden_act self.projection_dim = projection_dim self.projection_hidden_act = projection_hidden_act self.hidden_size = self.text_config.hidden_size self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = initializer_factor self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths) @classmethod def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs): return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/clap/convert_clap_original_pytorch_to_hf.py import argparse import re from laion_clap import CLAP_Module from transformers import AutoFeatureExtractor, ClapConfig, ClapModel KEYS_TO_MODIFY_MAPPING = {'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm'} processor = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def init_clap(checkpoint_path, model_type, enable_fusion=False): model = CLAP_Module(amodel=model_type, enable_fusion=enable_fusion) model.load_ckpt(checkpoint_path) return model def get_config_from_original(clap_model): audio_config = {'patch_embeds_hidden_size': clap_model.model.audio_branch.embed_dim, 'depths': clap_model.model.audio_branch.depths, 'hidden_size': clap_model.model.audio_projection[0].in_features} text_config = {'hidden_size': clap_model.model.text_branch.pooler.dense.in_features} return ClapConfig(audio_config=audio_config, text_config=text_config) def rename_state_dict(state_dict): model_state_dict = {} sequential_layers_pattern = '.*sequential.(\\d+).*' text_projection_pattern = '.*_projection.(\\d+).*' for (key, value) in state_dict.items(): for (key_to_modify, new_key) in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if re.match(sequential_layers_pattern, key): sequential_layer = re.match(sequential_layers_pattern, key).group(1) key = key.replace(f'sequential.{sequential_layer}.', f'layers.{int(sequential_layer) // 3}.linear.') elif re.match(text_projection_pattern, key): projecton_layer = int(re.match(text_projection_pattern, key).group(1)) transformers_projection_layer = 1 if projecton_layer == 0 else 2 key = key.replace(f'_projection.{projecton_layer}.', f'_projection.linear{transformers_projection_layer}.') if 'audio' and 'qkv' in key: mixed_qkv = value qkv_dim = mixed_qkv.size(0) // 3 query_layer = mixed_qkv[:qkv_dim] key_layer = mixed_qkv[qkv_dim:qkv_dim * 2] value_layer = mixed_qkv[qkv_dim * 2:] model_state_dict[key.replace('qkv', 'query')] = query_layer model_state_dict[key.replace('qkv', 'key')] = key_layer model_state_dict[key.replace('qkv', 'value')] = value_layer else: model_state_dict[key] = value return model_state_dict def convert_clap_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path, model_type, enable_fusion=False): clap_model = init_clap(checkpoint_path, model_type, enable_fusion=enable_fusion) clap_model.eval() state_dict = clap_model.model.state_dict() state_dict = rename_state_dict(state_dict) transformers_config = get_config_from_original(clap_model) transformers_config.audio_config.enable_fusion = enable_fusion model = ClapModel(transformers_config) model.load_state_dict(state_dict, strict=False) model.save_pretrained(pytorch_dump_folder_path) transformers_config.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') parser.add_argument('--model_type', default='HTSAT-tiny', type=str, help='Whether to enable fusion or not') args = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.model_type, args.enable_fusion) # File: transformers-main/src/transformers/models/clap/feature_extraction_clap.py """""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging logger = logging.get_logger(__name__) class ClapFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_features', 'is_longer'] def __init__(self, feature_size=64, sampling_rate=48000, hop_length=480, max_length_s=10, fft_window_size=1024, padding_value=0.0, return_attention_mask=False, frequency_min: float=0, frequency_max: float=14000, top_db: int=None, truncation: str='fusion', padding: str='repeatpad', **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs) self.top_db = top_db self.truncation = truncation self.padding = padding self.fft_window_size = fft_window_size self.nb_frequency_bins = (fft_window_size >> 1) + 1 self.hop_length = hop_length self.max_length_s = max_length_s self.nb_max_samples = max_length_s * sampling_rate self.sampling_rate = sampling_rate self.frequency_min = frequency_min self.frequency_max = frequency_max self.mel_filters = mel_filter_bank(num_frequency_bins=self.nb_frequency_bins, num_mel_filters=feature_size, min_frequency=frequency_min, max_frequency=frequency_max, sampling_rate=sampling_rate, norm=None, mel_scale='htk') self.mel_filters_slaney = mel_filter_bank(num_frequency_bins=self.nb_frequency_bins, num_mel_filters=feature_size, min_frequency=frequency_min, max_frequency=frequency_max, sampling_rate=sampling_rate, norm='slaney', mel_scale='slaney') def to_dict(self) -> Dict[str, Any]: output = copy.deepcopy(self.__dict__) output['feature_extractor_type'] = self.__class__.__name__ if 'mel_filters' in output: del output['mel_filters'] if 'mel_filters_slaney' in output: del output['mel_filters_slaney'] return output def _np_extract_fbank_features(self, waveform: np.array, mel_filters: Optional[np.array]=None) -> np.ndarray: log_mel_spectrogram = spectrogram(waveform, window_function(self.fft_window_size, 'hann'), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=mel_filters, log_mel='dB') return log_mel_spectrogram.T def _random_mel_fusion(self, mel, total_frames, chunk_frames): ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3) if len(ranges[1]) == 0: ranges[1] = [0] if len(ranges[2]) == 0: ranges[2] = [0] idx_front = np.random.choice(ranges[0]) idx_middle = np.random.choice(ranges[1]) idx_back = np.random.choice(ranges[2]) mel_chunk_front = mel[idx_front:idx_front + chunk_frames, :] mel_chunk_middle = mel[idx_middle:idx_middle + chunk_frames, :] mel_chunk_back = mel[idx_back:idx_back + chunk_frames, :] mel = torch.tensor(mel[None, None, :]) mel_shrink = torch.nn.functional.interpolate(mel, size=[chunk_frames, 64], mode='bilinear', align_corners=False) mel_shrink = mel_shrink[0][0].numpy() mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0) return mel_fusion def _get_input_mel(self, waveform: np.array, max_length, truncation, padding) -> np.array: if waveform.shape[0] > max_length: if truncation == 'rand_trunc': longer = True overflow = len(waveform) - max_length idx = np.random.randint(0, overflow + 1) waveform = waveform[idx:idx + max_length] input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] elif truncation == 'fusion': mel = self._np_extract_fbank_features(waveform, self.mel_filters) chunk_frames = max_length // self.hop_length + 1 total_frames = mel.shape[0] if chunk_frames == total_frames: input_mel = np.stack([mel, mel, mel, mel], axis=0) longer = False else: input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames) longer = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented') else: longer = False if waveform.shape[0] < max_length: if padding == 'repeat': n_repeat = int(max_length / len(waveform)) waveform = np.tile(waveform, n_repeat + 1)[:max_length] if padding == 'repeatpad': n_repeat = int(max_length / len(waveform)) waveform = np.tile(waveform, n_repeat) waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode='constant', constant_values=0) if truncation == 'fusion': input_mel = self._np_extract_fbank_features(waveform, self.mel_filters) input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0) else: input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] return (input_mel, longer) def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], truncation: str=None, padding: Optional[str]=None, max_length: Optional[int]=None, sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: truncation = truncation if truncation is not None else self.truncation padding = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float64) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float64) if not is_batched: raw_speech = [np.asarray(raw_speech)] padded_inputs = [self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding) for waveform in raw_speech] input_mel = [] is_longer = [] for (mel, longer) in padded_inputs: input_mel.append(mel) is_longer.append(longer) if truncation == 'fusion' and sum(is_longer) == 0: rand_idx = np.random.randint(0, len(input_mel)) is_longer[rand_idx] = True if isinstance(input_mel[0], List): input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel] is_longer = [[longer] for longer in is_longer] input_features = {'input_features': input_mel, 'is_longer': is_longer} input_features = BatchFeature(input_features) if return_tensors is not None: input_features = input_features.convert_to_tensors(return_tensors) return input_features # File: transformers-main/src/transformers/models/clap/modeling_clap.py """""" import collections import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'laion/clap-htsat-fused' def interpolate(hidden_states, ratio): (batch_size, time_length, classes_num) = hidden_states.shape upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num) return upsampled def window_partition(hidden_states, window_size): (batch_size, height, width, num_channels) = hidden_states.shape hidden_states = hidden_states.view(batch_size, height // window_size, window_size, width // window_size, window_size, num_channels) windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows def window_reverse(windows, window_size, height, width): num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: labels = torch.arange(len(logits), device=logits.device) return nn.functional.cross_entropy(logits, labels) @dataclass class ClapTextModelOutput(ModelOutput): text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ClapAudioModelOutput(ModelOutput): audio_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ClapOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_audio: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None audio_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None audio_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'audio_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class ClapDropPath(nn.Module): def __init__(self, drop_prob=None): super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states): if self.drop_prob == 0.0 or not self.training: return hidden_states keep_prob = 1 - self.drop_prob shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device) random_tensor.floor_() output = hidden_states.div(keep_prob) * random_tensor return output class ClapAudioAFFBlock(nn.Module): def __init__(self, config: ClapAudioConfig): super().__init__() channels = config.patch_embeds_hidden_size downsize_ratio = config.aff_block_r inter_channels = int(channels // downsize_ratio) self.local_att = nn.Sequential(nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels)) self.global_att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels)) self.sigmoid = nn.Sigmoid() def forward(self, hidden_states, residual): attention_input = hidden_states + residual fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input) fused_layer_output = self.sigmoid(fused_layer_output) output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output) return output class ClapAudioPatchEmbed(nn.Module): def __init__(self, config: ClapAudioConfig): super().__init__() img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size patch_size = (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size patch_stride = (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride self.img_size = img_size self.patch_stride = patch_stride self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = config.flatten_patch_embeds self.enable_fusion = config.enable_fusion padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) scale_factor = 4 if self.enable_fusion and config.fusion_type == 'channel_map' else 1 self.proj = nn.Conv2d(config.patch_embed_input_channels * scale_factor, config.patch_embeds_hidden_size, kernel_size=patch_size, stride=patch_stride, padding=padding) self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity() if self.enable_fusion: self.fusion_model = ClapAudioAFFBlock(config) self.mel_conv2d = nn.Conv2d(config.patch_embed_input_channels, config.patch_embeds_hidden_size, kernel_size=(patch_size[0], patch_size[1] * 3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding) def forward(self, hidden_states, is_longer_idx=None): if self.enable_fusion: global_hidden_states = hidden_states[:, 0:1, :, :] (batch_size, num_channels, height, width) = global_hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") global_hidden_states = self.proj(global_hidden_states) output_width = global_hidden_states.size(-1) if len(is_longer_idx) > 0: local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous() (batch_size, num_channels, height, width) = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width) local_hidden_states = self.mel_conv2d(local_hidden_states) (_, features, height, width) = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width) local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3) local_width = local_hidden_states.size(-1) local_hidden_states = torch.nn.functional.pad(local_hidden_states, (0, output_width - local_width), 'constant', 0) global_hidden_states[is_longer_idx] = self.fusion_model(global_hidden_states[is_longer_idx], local_hidden_states) hidden_states = global_hidden_states else: (_, _, height, width) = hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") hidden_states = self.proj(hidden_states) if self.flatten: hidden_states = hidden_states.flatten(2).transpose(1, 2) hidden_states = self.norm(hidden_states) return hidden_states class ClapAudioSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)) coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij')) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer('relative_position_index', relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: (batch_size, dim, num_channels) = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class ClapAudioSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ClapAudioAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size) self.output = ClapAudioSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ClapAudioIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ClapAudioOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ClapAudioLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = ClapDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = ClapAudioIntermediate(config, dim) self.output = ClapAudioOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: self.shift_size = torch_int(0) self.window_size = torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution) def get_attn_mask(self, height, width, dtype, device): if self.shift_size > 0: img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device) height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass (height, width) = input_dimensions (batch_size, _, channels) = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) (hidden_states, pad_values) = self.maybe_pad(hidden_states, height, width) (_, height_pad, width_pad, _) = hidden_states.shape if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device) attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class ClapAudioStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList([ClapAudioLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)]) if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> Tuple[torch.Tensor]: (height, width) = input_dimensions for (i, layer_module) in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: (height_downsampled, width_downsampled) = ((height + 1) // 2, (width + 1) // 2) output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class ClapAudioPatchMerging(nn.Module): def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = height % 2 == 1 or width % 2 == 1 if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: (height, width) = input_dimensions (batch_size, dim, num_channels) = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) input_feature = self.maybe_pad(input_feature, height, width) input_feature_0 = input_feature[:, 0::2, 0::2, :] input_feature_1 = input_feature[:, 1::2, 0::2, :] input_feature_2 = input_feature[:, 0::2, 1::2, :] input_feature_3 = input_feature[:, 1::2, 1::2, :] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature class ClapAudioEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_layers = len(config.depths) self.config = config self.patch_embed = ClapAudioPatchEmbed(config) self.enable_fusion = config.enable_fusion self.patch_stride = self.patch_embed.patch_stride self.spec_size = config.spec_size self.freq_ratio = config.spec_size // config.num_mel_bins self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1)) drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] grid_size = self.patch_embed.grid_size self.input_resolutions = [(grid_size[0] // 2 ** i, grid_size[1] // 2 ** i) for i in range(self.num_layers)] self.layers = nn.ModuleList([ClapAudioStage(config=config, dim=int(config.patch_embeds_hidden_size * 2 ** i_layer), input_resolution=self.input_resolutions[i_layer], depth=config.depths[i_layer], num_heads=config.num_attention_heads[i_layer], drop_path=drop_path_rate[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=ClapAudioPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)]) self.gradient_checkpointing = False self.batch_norm = nn.BatchNorm2d(config.num_mel_bins) self.norm = nn.LayerNorm(self.num_features) self.depths = config.depths self.avgpool = nn.AdaptiveAvgPool1d(1) def reshape_mel2img(self, normalized_input_features): (_, _, time_length, freq_length) = normalized_input_features.shape spec_width = int(self.spec_size * self.freq_ratio) spec_heigth = self.spec_size // self.freq_ratio if time_length > spec_width or freq_length > spec_heigth: raise ValueError('the wav size should be less than or equal to the swin input size') if time_length < spec_width: normalized_input_features = nn.functional.interpolate(normalized_input_features, (spec_width, freq_length), mode='bicubic', align_corners=True) if freq_length < spec_heigth: normalized_input_features = nn.functional.interpolate(normalized_input_features, (time_length, spec_heigth), mode='bicubic', align_corners=True) (batch, channels, time, freq) = normalized_input_features.shape normalized_input_features = normalized_input_features.reshape(batch, channels * self.freq_ratio, time // self.freq_ratio, freq) normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous() normalized_input_features = normalized_input_features.reshape(batch, channels, freq * self.freq_ratio, time // self.freq_ratio) return normalized_input_features def forward(self, input_features, is_longer: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, ClapAudioModelOutput]: input_features = input_features.transpose(1, 3) normalized_input_features = self.batch_norm(input_features) normalized_input_features = normalized_input_features.transpose(1, 3) is_longer_list_idx = None if self.enable_fusion: is_longer_list = is_longer.to(input_features.device) is_longer_list_idx = torch.where(is_longer_list == 1)[0] hidden_states = self.reshape_mel2img(normalized_input_features) frames_num = hidden_states.shape[2] hidden_states = self.patch_embed(hidden_states, is_longer_list_idx) all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None input_dimensions = self.input_resolutions[0] if output_hidden_states: (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for (i, layer_module) in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None input_dimensions = self.input_resolutions[i] if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: (batch_size, _, hidden_size) = hidden_states_before_downsampling.shape reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] last_hidden_state = self.norm(hidden_states) (batch_size, _, n_channels) = last_hidden_state.shape freq_shape = frames_num // 2 ** (len(self.depths) - 1) // self.patch_stride[0] temporal_shape = frames_num // 2 ** (len(self.depths) - 1) // self.patch_stride[1] last_hidden_state = last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape) (batch_size, n_channels, n_frequencies, n_temp) = last_hidden_state.shape c_freq_bin = n_frequencies // self.freq_ratio last_hidden_state = last_hidden_state.reshape(batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp) last_hidden_state = last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1) latent_output = self.avgpool(torch.flatten(last_hidden_state, 2)) latent_output = torch.flatten(latent_output, 1) if not return_dict: return tuple((v for v in [last_hidden_state, latent_output, all_reshaped_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=latent_output, hidden_states=all_reshaped_hidden_states, attentions=all_self_attentions) CLAP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ClapConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CLAP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLAP_AUDIO_INPUTS_DOCSTRING = '\n Args:\n input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also\n retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.\n is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):\n Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance\n the features.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLAP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also\n retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class ClapProjectionLayer(nn.Module): def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]): super().__init__() self.config = config hidden_size = config.hidden_size projection_dim = config.projection_dim self.linear1 = nn.Linear(hidden_size, projection_dim) self.activation = ACT2FN[config.projection_hidden_act] self.linear2 = nn.Linear(projection_dim, projection_dim) def forward(self, hidden_states): hidden_states = self.linear1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.linear2(hidden_states) return hidden_states class ClapTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) class ClapTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class ClapTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states CLAP_TEXT_SELF_ATTENTION_CLASSES = {'eager': ClapTextSelfAttention} class ClapTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = CLAP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = ClapTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ClapTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ClapTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class ClapTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ClapTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = ClapTextAttention(config, position_embedding_type='absolute') self.intermediate = ClapTextIntermediate(config) self.output = ClapTextOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ClapTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class ClapTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ClapPreTrainedModel(PreTrainedModel): config_class = ClapConfig base_model_prefix = 'clap' supports_gradient_checkpointing = False def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, ClapTextEmbeddings): module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, ClapModel): nn.init.normal_(module.logit_scale_a, std=factor * 0.02) nn.init.normal_(module.logit_scale_t, std=factor * 0.02) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, (nn.Conv2d, nn.Linear)): in_proj_std = self.config.hidden_size ** (-0.5) * (2 * self.config.num_hidden_layers) ** (-0.5) * factor nn.init.normal_(module.weight, std=in_proj_std) if module.bias is not None: module.bias.data.zero_() class ClapAudioModel(ClapPreTrainedModel): config_class = ClapAudioConfig main_input_name = 'input_features' def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_encoder = ClapAudioEncoder(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_encoder.patch_embed.proj @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig) def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return self.audio_encoder(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class ClapTextModel(ClapPreTrainedModel): config_class = ClapTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ClapTextEmbeddings(config) self.encoder = ClapTextEncoder(config) self.pooler = ClapTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings(CLAP_START_DOCSTRING) class ClapModel(ClapPreTrainedModel): config_class = ClapConfig def __init__(self, config: ClapConfig): super().__init__(config) if not isinstance(config.text_config, ClapTextConfig): raise TypeError(f'config.text_config is expected to be of type ClapTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.audio_config, ClapAudioConfig): raise TypeError(f'config.audio_config is expected to be of type ClapAudioConfig but is of type {type(config.audio_config)}.') text_config = config.text_config audio_config = config.audio_config self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.projection_dim = config.projection_dim self.text_model = ClapTextModel(text_config) self.text_projection = ClapProjectionLayer(text_config) self.audio_model = ClapAudioModel(audio_config) self.audio_projection = ClapProjectionLayer(audio_config) self.post_init() @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output text_features = self.text_projection(pooled_output) text_features = F.normalize(text_features, dim=-1) return text_features @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) def get_audio_features(self, input_features: Optional[torch.Tensor]=None, is_longer: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, return_dict=return_dict) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_features = self.audio_projection(pooled_output) audio_features = F.normalize(audio_features, dim=-1) return audio_features @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ClapOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(audio_embeds) text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale_text = self.logit_scale_t.exp() logit_scale_audio = self.logit_scale_a.exp() logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio loss = None if return_loss: caption_loss = contrastive_loss(logits_per_text) audio_loss = contrastive_loss(logits_per_audio.t()) loss = (caption_loss + audio_loss) / 2.0 if not return_dict: output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs) return (loss,) + output if loss is not None else output return ClapOutput(loss=loss, logits_per_audio=logits_per_audio, logits_per_text=logits_per_text, text_embeds=text_embeds, audio_embeds=audio_embeds, text_model_output=text_outputs, audio_model_output=audio_outputs) @add_start_docstrings('\n CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).\n ', CLAP_START_DOCSTRING) class ClapTextModelWithProjection(ClapPreTrainedModel): config_class = ClapTextConfig def __init__(self, config: ClapTextConfig): super().__init__(config) self.text_model = ClapTextModel(config) self.text_projection = ClapProjectionLayer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.text_model.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ClapTextModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output text_embeds = self.text_projection(pooled_output) if not return_dict: outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] return tuple((output for output in outputs if output is not None)) return ClapTextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions) @add_start_docstrings('\n CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).\n ', CLAP_START_DOCSTRING) class ClapAudioModelWithProjection(ClapPreTrainedModel): config_class = ClapAudioConfig main_input_name = 'input_features' def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_model = ClapAudioModel(config) self.audio_projection = ClapProjectionLayer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_model.audio_encoder.patch_embed.proj @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig) def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ClapAudioModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(pooled_output) if not return_dict: outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:] return tuple((output for output in outputs if output is not None)) return ClapAudioModelOutput(audio_embeds=audio_embeds, last_hidden_state=audio_outputs.last_hidden_state, attentions=audio_outputs.attentions, hidden_states=audio_outputs.hidden_states) # File: transformers-main/src/transformers/models/clap/processing_clap.py """""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class ClapProcessor(ProcessorMixin): feature_extractor_class = 'ClapFeatureExtractor' tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, return_tensors=None, **kwargs): sampling_rate = kwargs.pop('sampling_rate', None) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if audios is not None: audio_features = self.feature_extractor(audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs) if text is not None and audios is not None: encoding.update(audio_features) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) # File: transformers-main/src/transformers/models/clip/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available _import_structure = {'configuration_clip': ['CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig'], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_clip_fast'] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_clip'] = ['CLIPFeatureExtractor'] _import_structure['image_processing_clip'] = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_clip'] = ['CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', 'CLIPForImageClassification'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_clip'] = ['TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_clip'] = ['FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPTextModelWithProjection', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel'] if TYPE_CHECKING: from .configuration_clip import CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import CLIPForImageClassification, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextModelWithProjection, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/clip/configuration_clip.py """""" import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class CLIPTextConfig(PretrainedConfig): model_type = 'clip_text_model' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, projection_dim=512, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clip': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class CLIPVisionConfig(PretrainedConfig): model_type = 'clip_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clip': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class CLIPConfig(PretrainedConfig): model_type = 'clip' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()} for (key, value) in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.') self.text_config = CLIPTextConfig(**text_config) self.vision_config = CLIPVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) class CLIPOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1, framework: Optional['TensorType']=None) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework) image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size, framework=framework) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14 # File: transformers-main/src/transformers/models/clip/convert_clip_original_pytorch_to_hf.py import argparse import torch from clip import load from transformers import CLIPConfig, CLIPModel def copy_attn_layer(hf_attn_layer, pt_attn_layer): (q_proj, k_proj, v_proj) = pt_attn_layer.in_proj_weight.chunk(3, dim=0) (q_proj_bias, k_proj_bias, v_proj_bias) = pt_attn_layer.in_proj_bias.chunk(3, dim=0) out_proj_weights = pt_attn_layer.out_proj.weight out_proj_bias = pt_attn_layer.out_proj.bias hf_attn_layer.q_proj.weight.data = q_proj hf_attn_layer.q_proj.bias.data = q_proj_bias hf_attn_layer.k_proj.weight.data = k_proj hf_attn_layer.k_proj.bias.data = k_proj_bias hf_attn_layer.v_proj.weight.data = v_proj hf_attn_layer.v_proj.bias.data = v_proj_bias hf_attn_layer.out_proj.weight = out_proj_weights hf_attn_layer.out_proj.bias = out_proj_bias def copy_mlp(hf_mlp, pt_mlp): copy_linear(hf_mlp.fc1, pt_mlp.c_fc) copy_linear(hf_mlp.fc2, pt_mlp.c_proj) def copy_linear(hf_linear, pt_linear): hf_linear.weight = pt_linear.weight hf_linear.bias = pt_linear.bias def copy_layer(hf_layer, pt_layer): copy_linear(hf_layer.layer_norm1, pt_layer.ln_1) copy_linear(hf_layer.layer_norm2, pt_layer.ln_2) copy_mlp(hf_layer.mlp, pt_layer.mlp) copy_attn_layer(hf_layer.self_attn, pt_layer.attn) def copy_layers(hf_layers, pt_layers): for (hf_layer, pt_layer) in zip(hf_layers, pt_layers): copy_layer(hf_layer, pt_layer) def copy_encoder(hf_encoder, pt_model): hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks) def copy_text_model_and_projection(hf_model, pt_model): hf_model.text_projection.weight.data = pt_model.text_projection.data.T.contiguous() copy_encoder(hf_model.text_model, pt_model) def copy_vison_model_and_projection(hf_model, pt_model): hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T.contiguous() copy_linear(hf_model.vision_model.pre_layrnorm, pt_model.visual.ln_pre) copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post) hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks) @torch.no_grad() def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): if config_path is not None: config = CLIPConfig.from_pretrained(config_path) else: config = CLIPConfig(projection_dim=512, text_config={}, vision_config={}) hf_model = CLIPModel(config).eval() (pt_model, _) = load(checkpoint_path, device='cpu', jit=False) pt_model = pt_model.eval() copy_text_model_and_projection(hf_model, pt_model) copy_vison_model_and_projection(hf_model, pt_model) hf_model.logit_scale = pt_model.logit_scale input_ids = torch.tensor([[config.text_config.bos_token_id] + list(range(3, 77)) + [config.text_config.eos_token_id] + [config.text_config.pad_token_id]]) pixel_values = torch.randn(1, 3, 224, 224) hf_outputs = hf_model(input_ids=input_ids, pixel_values=pixel_values, return_dict=True) hf_logits_per_image = hf_outputs.logits_per_image hf_logits_per_text = hf_outputs.logits_per_text (pt_logits_per_image, pt_logits_per_text) = pt_model(pixel_values, input_ids) assert torch.allclose(hf_logits_per_image, pt_logits_per_image, atol=0.001) assert torch.allclose(hf_logits_per_text, pt_logits_per_text, atol=0.001) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') args = parser.parse_args() convert_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) # File: transformers-main/src/transformers/models/clip/feature_extraction_clip.py """""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor logger = logging.get_logger(__name__) class CLIPFeatureExtractor(CLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use CLIPImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/clip/image_processing_clip.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class CLIPImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = ['images', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format'] if 'use_square_size' in kwargs and kwargs['use_square_size']: self.size = {'height': size['shortest_edge'], 'width': size['shortest_edge']} delattr(self, 'use_square_size') def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/clip/modeling_clip.py """""" from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_2_2 from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'CLIPConfig' _CHECKPOINT_FOR_DOC = 'openai/clip-vit-base-patch32' _IMAGE_CLASS_CHECKPOINT = 'openai/clip-vit-base-patch32' _IMAGE_CLASS_EXPECTED_OUTPUT = 'LABEL_0' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def _get_vector_norm(tensor: torch.Tensor) -> torch.Tensor: square_tensor = torch.pow(tensor, 2) sum_tensor = torch.sum(square_tensor, dim=-1, keepdim=True) normed_tensor = torch.pow(sum_tensor, 0.5) return normed_tensor @dataclass class CLIPVisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CLIPTextModelOutput(ModelOutput): text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class CLIPOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class CLIPVisionEmbeddings(nn.Module): def __init__(self, config: CLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class CLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class CLIPAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class CLIPFlashAttention2(CLIPAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: output_attentions = False (batch_size, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim) key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim) value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim) dropout_rate = self.dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, is_causal=causal_attention_mask is not None, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights) class CLIPSdpaAttention(CLIPAttention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if output_attentions: logger.warning_once('CLIPModel is using CLIPSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) if attention_mask is not None and causal_attention_mask is not None: attn_mask = attention_mask + causal_attention_mask elif causal_attention_mask is not None: attn_mask = causal_attention_mask else: attn_mask = attention_mask (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if not is_torch_greater_or_equal_than_2_2 and query_states.device.type == 'cuda' and (attn_mask is not None): query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attn_mask, dropout_p=self.dropout if self.training else 0.0, scale=self.scale) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None) CLIP_ATTENTION_CLASSES = {'eager': CLIPAttention, 'sdpa': CLIPSdpaAttention, 'flash_attention_2': CLIPFlashAttention2} class CLIPMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class CLIPEncoderLayer(nn.Module): def __init__(self, config: CLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIP_ATTENTION_CLASSES[config._attn_implementation](config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPPreTrainedModel(PreTrainedModel): config_class = CLIPConfig base_model_prefix = 'clip' supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn_2 = True def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, CLIPTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, CLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, CLIPAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, CLIPModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor) elif isinstance(module, CLIPVisionModelWithProjection): nn.init.normal_(module.visual_projection.weight, std=self.config.hidden_size ** (-0.5) * self.config.initializer_factor) elif isinstance(module, CLIPTextModelWithProjection): nn.init.normal_(module.text_projection.weight, std=self.config.hidden_size ** (-0.5) * self.config.initializer_factor) elif isinstance(module, CLIPForImageClassification): nn.init.normal_(module.classifier.weight, std=self.config.vision_config.hidden_size ** (-0.5) * self.config.initializer_factor) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() CLIP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class CLIPEncoder(nn.Module): def __init__(self, config: CLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class CLIPTextTransformer(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPTextEmbeddings(config) self.encoder = CLIPEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.eos_token_id = config.eos_token_id self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None and (not self._use_flash_attention_2): attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1)] else: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The text model from CLIP without any head or projection on top.', CLIP_START_DOCSTRING) class CLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ['CLIPTextEmbeddings', 'CLIPEncoderLayer'] def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class CLIPVisionTransformer(nn.Module): def __init__(self, config: CLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = CLIPEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The vision model from CLIP without any head or projection on top.', CLIP_START_DOCSTRING) class CLIPVisionModel(CLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = 'pixel_values' _no_split_modules = ['CLIPEncoderLayer'] def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings(CLIP_START_DOCSTRING) class CLIPModel(CLIPPreTrainedModel): config_class = CLIPConfig _no_split_modules = ['CLIPTextEmbeddings', 'CLIPEncoderLayer', 'CLIPVisionEmbeddings'] def __init__(self, config: CLIPConfig): super().__init__(config) if not isinstance(config.text_config, CLIPTextConfig): raise TypeError(f'config.text_config is expected to be of type CLIPTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, CLIPVisionConfig): raise TypeError(f'config.vision_config is expected to be of type CLIPVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_model = CLIPTextModel._from_config(text_config, attn_implementation=config._attn_implementation) self.text_model = text_model.text_model vision_model = CLIPVisionModel._from_config(vision_config, attn_implementation=config._attn_implementation) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / _get_vector_norm(image_embeds) text_embeds = text_embeds / _get_vector_norm(text_embeds) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) * logit_scale.to(text_embeds.device) logits_per_image = logits_per_text.t() loss = None if return_loss: loss = clip_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return CLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) @add_start_docstrings('\n CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).\n ', CLIP_START_DOCSTRING) class CLIPTextModelWithProjection(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ['CLIPTextEmbeddings', 'CLIPEncoderLayer'] def __init__(self, config: CLIPTextConfig): super().__init__(config) text_model = CLIPTextModel._from_config(config, attn_implementation=config._attn_implementation) self.text_model = text_model.text_model self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPTextModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_embeds = self.text_projection(pooled_output) if not return_dict: outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] return tuple((output for output in outputs if output is not None)) return CLIPTextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions) @add_start_docstrings('\n CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).\n ', CLIP_START_DOCSTRING) class CLIPVisionModelWithProjection(CLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = 'pixel_values' def __init__(self, config: CLIPVisionConfig): super().__init__(config) vision_model = CLIPVisionModel._from_config(config, attn_implementation=config._attn_implementation) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPVisionModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_embeds = self.visual_projection(pooled_output) if not return_dict: outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:] return tuple((output for output in outputs if output is not None)) return CLIPVisionModelOutput(image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions) @add_start_docstrings('\n CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of\n the patch tokens) e.g. for ImageNet.\n ', CLIP_START_DOCSTRING) class CLIPForImageClassification(CLIPPreTrainedModel): main_input_name = 'pixel_values' def __init__(self, config: CLIPConfig) -> None: super().__init__(config) self.num_labels = config.num_labels vision_model = CLIPVisionModel._from_config(config.vision_config, attn_implementation=config._attn_implementation) self.vision_model = vision_model.vision_model self.classifier = nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vision_model(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1) logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/clip/modeling_flax_clip.py from typing import Any, Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring from ...utils import ModelOutput, add_start_docstrings, logging from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig logger = logging.get_logger(__name__) CLIP_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' CLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @flax.struct.dataclass class FlaxCLIPTextModelOutput(ModelOutput): text_embeds: jnp.ndarray = None last_hidden_state: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None attentions: Optional[Tuple[jnp.ndarray, ...]] = None @flax.struct.dataclass class FlaxCLIPOutput(ModelOutput): logits_per_image: jnp.ndarray = None logits_per_text: jnp.ndarray = None text_embeds: jnp.ndarray = None image_embeds: jnp.ndarray = None text_model_output: FlaxBaseModelOutputWithPooling = None vision_model_output: FlaxBaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class FlaxCLIPVisionEmbeddings(nn.Module): config: CLIPVisionConfig dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size image_size = self.config.image_size patch_size = self.config.patch_size self.class_embedding = self.param('class_embedding', jax.nn.initializers.normal(stddev=0.02), (embed_dim,)) self.patch_embedding = nn.Conv(embed_dim, kernel_size=(patch_size, patch_size), strides=(patch_size, patch_size), padding='VALID', use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal()) self.num_patches = (image_size // patch_size) ** 2 num_positions = self.num_patches + 1 self.position_embedding = nn.Embed(num_positions, embed_dim, embedding_init=jax.nn.initializers.normal()) self.position_ids = jnp.expand_dims(jnp.arange(0, num_positions, dtype='i4'), axis=0) def __call__(self, pixel_values): patch_embeds = self.patch_embedding(pixel_values) (batch_size, height, width, channels) = patch_embeds.shape patch_embeds = jnp.reshape(patch_embeds, (batch_size, height * width, channels)) class_embeds = jnp.expand_dims(self.class_embedding, axis=(0, 1)) class_embeds = jnp.tile(class_embeds, (batch_size, 1, 1)) embeddings = jnp.concatenate([class_embeds, patch_embeds], axis=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class FlaxCLIPTextEmbeddings(nn.Module): config: CLIPTextConfig dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size self.token_embedding = nn.Embed(self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal()) self.position_embedding = nn.Embed(self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal()) self.position_ids = jnp.expand_dims(jnp.arange(0, self.config.max_position_embeddings, dtype='i4'), axis=(0, 1)) def __call__(self, input_ids, position_ids): input_embeds = self.token_embedding(input_ids.astype('i4')) position_embeds = self.position_embedding(position_ids.astype('i4')) embeddings = input_embeds + position_embeds return embeddings class FlaxCLIPAttention(nn.Module): config: Union[CLIPTextConfig, CLIPVisionConfig] dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size self.num_heads = self.config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = self.config.attention_dropout self.k_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) self.v_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) self.q_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) self.out_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) self.causal = isinstance(self.config, CLIPTextConfig) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='i4')) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, output_attentions: bool=False): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) causal_attention_mask = None if self.causal: (query_length, key_length) = (query.shape[1], key.shape[1]) causal_attention_mask = self.causal_mask[:, :, key_length - query_length:key_length, :key_length] if attention_mask is not None and causal_attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) attention_mask = combine_masks(attention_mask, causal_attention_mask, dtype='i4') elif causal_attention_mask is not None: attention_mask = causal_attention_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxCLIPMLP(nn.Module): config: Union[CLIPTextConfig, CLIPVisionConfig] dtype: jnp.dtype = jnp.float32 def setup(self): self.activation_fn = ACT2FN[self.config.hidden_act] self.fc1 = nn.Dense(self.config.intermediate_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) self.fc2 = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) def __call__(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class FlaxCLIPEncoderLayer(nn.Module): config: Union[CLIPTextConfig, CLIPVisionConfig] dtype: jnp.dtype = jnp.float32 def setup(self): self.self_attn = FlaxCLIPAttention(self.config, dtype=self.dtype) self.layer_norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.mlp = FlaxCLIPMLP(self.config, dtype=self.dtype) self.layer_norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False): residual = hidden_states hidden_states = self.layer_norm1(hidden_states) attn_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions) hidden_states = attn_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += attn_outputs[1:] return outputs class FlaxCLIPLayerCollection(nn.Module): config: Union[CLIPTextConfig, CLIPVisionConfig] dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxCLIPEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxCLIPEncoder(nn.Module): config: Union[CLIPTextConfig, CLIPVisionConfig] dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = FlaxCLIPLayerCollection(self.config, dtype=self.dtype) def __call__(self, inputs_embeds, attention_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layers(hidden_states=inputs_embeds, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxCLIPTextTransformer(nn.Module): config: CLIPTextConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embeddings = FlaxCLIPTextEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.eos_token_id = self.config.eos_token_id def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: pooled_output = last_hidden_state[jnp.arange(last_hidden_state.shape[0]), input_ids.argmax(axis=-1)] else: pooled_output = last_hidden_state[jnp.arange(last_hidden_state.shape[0]), (input_ids == self.eos_token_id).argmax(axis=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return FlaxBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class FlaxCLIPVisionTransformer(nn.Module): config: CLIPVisionConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embeddings = FlaxCLIPVisionEmbeddings(self.config, dtype=self.dtype) self.pre_layrnorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype) self.post_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, pixel_values=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict: bool=True): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return FlaxBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class FlaxCLIPTextPreTrainedModel(FlaxPreTrainedModel): config_class = CLIPTextConfig module_class: nn.Module = None def __init__(self, config: CLIPTextConfig, input_shape=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def __call__(self, input_ids, attention_mask=None, position_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxCLIPVisionPreTrainedModel(FlaxPreTrainedModel): config_class = CLIPVisionConfig main_input_name = 'pixel_values' module_class: nn.Module = None def __init__(self, config: CLIPVisionConfig, input_shape: Optional[Tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): if input_shape is None: input_shape = (1, config.image_size, config.image_size, 3) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: pixel_values = jax.random.normal(rng, input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, pixel_values)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def __call__(self, pixel_values, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng return self.module.apply({'params': params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxCLIPPreTrainedModel(FlaxPreTrainedModel): config_class = CLIPConfig module_class: nn.Module = None def __init__(self, config: CLIPConfig, input_shape: Optional[Tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): if input_shape is None: input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape[0], dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) attention_mask = jnp.ones_like(input_ids) pixel_values = jax.random.normal(rng, input_shape[1]) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def __call__(self, input_ids, pixel_values, attention_mask=None, position_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(pixel_values, dtype=jnp.float32), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) def get_text_features(self, input_ids, attention_mask=None, position_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train=False): if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _get_features(module, input_ids, attention_mask, position_ids, deterministic): text_outputs = module.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic) pooled_output = text_outputs[1] text_features = module.text_projection(pooled_output) return text_features return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, method=_get_features, rngs=rngs) def get_image_features(self, pixel_values, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train=False): pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _get_features(module, pixel_values, deterministic): vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) pooled_output = vision_outputs[1] image_features = module.visual_projection(pooled_output) return image_features return self.module.apply({'params': params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, method=_get_features, rngs=rngs) class FlaxCLIPTextModule(nn.Module): config: CLIPTextConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxCLIPTextModel(FlaxCLIPTextPreTrainedModel): module_class = FlaxCLIPTextModule FLAX_CLIP_TEXT_MODEL_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxCLIPTextModel\n\n >>> model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")\n >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")\n\n >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> last_hidden_state = outputs.last_hidden_state\n >>> pooler_output = outputs.pooler_output # pooled (EOS token) states\n ```\n' overwrite_call_docstring(FlaxCLIPTextModel, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxCLIPTextModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPTextConfig) class FlaxCLIPTextModelWithProjectionModule(nn.Module): config: CLIPTextConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype) self.text_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_embeds = self.text_projection(pooled_output) if not return_dict: return (text_embeds, text_outputs[0]) + text_outputs[2:] return FlaxCLIPTextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions) class FlaxCLIPTextModelWithProjection(FlaxCLIPTextPreTrainedModel): module_class = FlaxCLIPTextModelWithProjectionModule FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxCLIPTextModelWithProjection\n\n >>> model = FlaxCLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")\n >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")\n\n >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> text_embeds = outputs.text_embeds\n ```\n' overwrite_call_docstring(FlaxCLIPTextModelWithProjection, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING) append_replace_return_docstrings(FlaxCLIPTextModelWithProjection, output_type=FlaxCLIPTextModelOutput, config_class=CLIPTextConfig) class FlaxCLIPVisionModule(nn.Module): config: CLIPVisionConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.vision_model = FlaxCLIPVisionTransformer(self.config, dtype=self.dtype) def __call__(self, pixel_values, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.vision_model(pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxCLIPVisionModel(FlaxCLIPVisionPreTrainedModel): module_class = FlaxCLIPVisionModule FLAX_CLIP_VISION_MODEL_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, FlaxCLIPVisionModel\n\n >>> model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")\n >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(images=image, return_tensors="np")\n\n >>> outputs = model(**inputs)\n >>> last_hidden_state = outputs.last_hidden_state\n >>> pooler_output = outputs.pooler_output # pooled CLS states\n ```\n' overwrite_call_docstring(FlaxCLIPVisionModel, CLIP_VISION_INPUTS_DOCSTRING + FLAX_CLIP_VISION_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxCLIPVisionModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPVisionConfig) class FlaxCLIPModule(nn.Module): config: CLIPConfig dtype: jnp.dtype = jnp.float32 def setup(self): text_config = self.config.text_config vision_config = self.config.vision_config self.projection_dim = self.config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = FlaxCLIPTextTransformer(text_config, dtype=self.dtype) self.vision_model = FlaxCLIPVisionTransformer(vision_config, dtype=self.dtype) self.visual_projection = nn.Dense(self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False) self.text_projection = nn.Dense(self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False) self.logit_scale = self.param('logit_scale', lambda _, shape: jnp.ones(shape) * self.config.logit_scale_init_value, []) def __call__(self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) logit_scale = jnp.exp(self.logit_scale) logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale logits_per_image = logits_per_text.T if not return_dict: return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return FlaxCLIPOutput(logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) @add_start_docstrings(CLIP_START_DOCSTRING) class FlaxCLIPModel(FlaxCLIPPreTrainedModel): module_class = FlaxCLIPModule FLAX_CLIP_MODEL_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> import jax\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, FlaxCLIPModel\n\n >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")\n >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(\n ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True\n ... )\n\n >>> outputs = model(**inputs)\n >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score\n >>> probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities\n ```\n' overwrite_call_docstring(FlaxCLIPModel, CLIP_INPUTS_DOCSTRING + FLAX_CLIP_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxCLIPModel, output_type=FlaxCLIPOutput, config_class=CLIPConfig) # File: transformers-main/src/transformers/models/clip/modeling_tf_clip.py """""" from __future__ import annotations import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai/clip-vit-base-patch32' LARGE_NEGATIVE = -100000000.0 def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean(keras.metrics.sparse_categorical_crossentropy(y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True)) def clip_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 @dataclass class TFCLIPOutput(ModelOutput): loss: tf.Tensor | None = None logits_per_image: tf.Tensor = None logits_per_text: tf.Tensor = None text_embeds: tf.Tensor = None image_embeds: tf.Tensor = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class TFCLIPVisionEmbeddings(keras.layers.Layer): def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.config = config self.patch_embedding = keras.layers.Conv2D(filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, padding='valid', data_format='channels_last', use_bias=False, kernel_initializer=get_initializer(self.config.initializer_range * self.config.initializer_factor), name='patch_embedding') def build(self, input_shape: tf.TensorShape=None): factor = self.config.initializer_factor self.class_embedding = self.add_weight(shape=(self.embed_dim,), initializer=get_initializer(self.embed_dim ** (-0.5) * factor), trainable=True, name='class_embedding') with tf.name_scope('position_embedding'): self.position_embedding = self.add_weight(shape=(self.num_positions, self.embed_dim), initializer=get_initializer(self.config.initializer_range * factor), trainable=True, name='embeddings') if self.built: return self.built = True if getattr(self, 'patch_embedding', None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, self.config.num_channels]) def call(self, pixel_values: tf.Tensor) -> tf.Tensor: (batch_size, num_channels, height, width) = shape_list(pixel_values) pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) patch_embeds = self.patch_embedding(pixel_values) patch_embeds = tf.reshape(tensor=patch_embeds, shape=(batch_size, self.num_patches, -1)) class_embeds = tf.broadcast_to(self.class_embedding, shape=(batch_size, 1, self.embed_dim)) embeddings = tf.concat((class_embeds, patch_embeds), axis=1) embeddings = embeddings + self.position_embedding return embeddings class TFCLIPTextEmbeddings(keras.layers.Layer): def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape=None): with tf.name_scope('token_embedding'): self.weight = self.add_weight(shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='weight') with tf.name_scope('position_embedding'): self.position_embedding = self.add_weight(shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='embeddings') super().build(input_shape) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings class TFCLIPAttention(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.attention_head_size = self.embed_dim // self.num_attention_heads if self.attention_head_size * self.num_attention_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_attention_heads}).') factor = config.initializer_factor in_proj_std = self.embed_dim ** (-0.5) * (2 * config.num_hidden_layers) ** (-0.5) * factor out_proj_std = self.embed_dim ** (-0.5) * factor self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.q_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='q_proj') self.k_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='k_proj') self.v_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='v_proj') self.dropout = keras.layers.Dropout(rate=config.attention_dropout) self.out_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name='out_proj') def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.q_proj(inputs=hidden_states) mixed_key_layer = self.k_proj(inputs=hidden_states) mixed_value_layer = self.v_proj(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if causal_attention_mask is not None: attention_scores = tf.add(attention_scores, causal_attention_mask) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) _attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.dropout(inputs=_attention_probs, training=training) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) attention_output = self.out_proj(attention_output, training=training) outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFCLIPMLP(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.activation_fn = get_tf_activation(config.hidden_act) factor = config.initializer_factor in_proj_std = config.hidden_size ** (-0.5) * (2 * config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * config.hidden_size) ** (-0.5) * factor self.fc1 = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name='fc1') self.fc2 = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name='fc2') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(inputs=hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.config.hidden_size]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.intermediate_size]) class TFCLIPEncoderLayer(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFCLIPAttention(config, name='self_attn') self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm1') self.mlp = TFCLIPMLP(config, name='mlp') self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm2') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(inputs=hidden_states) attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(inputs=hidden_states) hidden_states = self.mlp(hidden_states=hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) + attention_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'layer_norm1', None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'layer_norm2', None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFCLIPEncoder(keras.layers.Layer): def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) self.layers = [TFCLIPEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFCLIPTextTransformer(keras.layers.Layer): def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFCLIPTextEmbeddings(config, name='embeddings') self.encoder = TFCLIPEncoder(config, name='encoder') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='final_layer_norm') self.eos_token_id = config.eos_token_id self.embed_dim = config.hidden_size def call(self, input_ids: TFModelInputType, attention_mask: tf.Tensor, position_ids: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: input_shape = shape_list(input_ids) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) (batch_size, seq_length) = input_shape causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) attention_mask = _expand_mask(attention_mask) encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] sequence_output = self.final_layer_norm(inputs=sequence_output) if self.eos_token_id == 2: pooled_output = tf.gather_nd(params=sequence_output, indices=tf.stack(values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1)) else: pooled_output = tf.gather_nd(params=sequence_output, indices=tf.stack(values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1)), axis=1)) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) to_mask = tf.linalg.band_part(to_mask, 0, -1) to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) @keras_serializable class TFCLIPTextMainLayer(keras.layers.Layer): config_class = CLIPTextConfig def __init__(self, config: CLIPTextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.text_model = TFCLIPTextTransformer(config, name='text_model') def get_input_embeddings(self) -> keras.layers.Layer: return self.text_model.embeddings def set_input_embeddings(self, value: tf.Variable): self.text_model.embeddings.weight = value self.text_model.embeddings.vocab_size = shape_list(value)[0] @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_model_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return text_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'text_model', None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) class TFCLIPVisionTransformer(keras.layers.Layer): def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFCLIPVisionEmbeddings(config, name='embeddings') self.pre_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='pre_layrnorm') self.encoder = TFCLIPEncoder(config, name='encoder') self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='post_layernorm') self.embed_dim = config.hidden_size def call(self, pixel_values: TFModelInputType, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: embedding_output = self.embeddings(pixel_values=pixel_values) embedding_output = self.pre_layernorm(inputs=embedding_output) encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] pooled_output = self.post_layernorm(inputs=pooled_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'pre_layernorm', None) is not None: with tf.name_scope(self.pre_layernorm.name): self.pre_layernorm.build([None, None, self.embed_dim]) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'post_layernorm', None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, self.embed_dim]) @keras_serializable class TFCLIPVisionMainLayer(keras.layers.Layer): config_class = CLIPVisionConfig def __init__(self, config: CLIPVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.vision_model = TFCLIPVisionTransformer(config, name='vision_model') def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings @unpack_inputs def call(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if pixel_values is None: raise ValueError('You have to specify pixel_values') vision_model_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return vision_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) @keras_serializable class TFCLIPMainLayer(keras.layers.Layer): config_class = CLIPConfig def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, CLIPTextConfig): raise TypeError(f'config.text_config is expected to be of type CLIPTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, CLIPVisionConfig): raise TypeError(f'config.vision_config is expected to be of type CLIPVisionConfig but is of type {type(config.vision_config)}.') self.config = config text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_model = TFCLIPTextTransformer(text_config, name='text_model') self.vision_model = TFCLIPVisionTransformer(vision_config, name='vision_model') self.visual_projection = keras.layers.Dense(units=self.projection_dim, kernel_initializer=get_initializer(vision_config.hidden_size ** (-0.5) * self.config.initializer_factor), use_bias=False, name='visual_projection') self.text_projection = keras.layers.Dense(units=self.projection_dim, kernel_initializer=get_initializer(text_config.hidden_size ** (-0.5) * self.config.initializer_factor), use_bias=False, name='text_projection') self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size def build(self, input_shape: tf.TensorShape=None): self.logit_scale = self.add_weight(shape=(1,), initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True, name='logit_scale') if self.built: return self.built = True if getattr(self, 'text_model', None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'visual_projection', None) is not None: with tf.name_scope(self.visual_projection.name): self.visual_projection.build([None, None, self.vision_embed_dim]) if getattr(self, 'text_projection', None) is not None: with tf.name_scope(self.text_projection.name): self.text_projection.build([None, None, self.text_embed_dim]) @unpack_inputs def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: if input_ids is None: raise ValueError('You have to specify either input_ids') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = text_outputs[1] text_features = self.text_projection(inputs=pooled_output) return text_features @unpack_inputs def get_image_features(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: if pixel_values is None: raise ValueError('You have to specify pixel_values') vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = vision_outputs[1] image_features = self.visual_projection(inputs=pooled_output) return image_features @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, pixel_values: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]: if input_ids is None: raise ValueError('You have to specify either input_ids') if pixel_values is None: raise ValueError('You have to specify pixel_values') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(inputs=image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(inputs=text_embeds) image_embeds = image_embeds / tf.norm(tensor=image_embeds, ord='euclidean', axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(tensor=text_embeds, ord='euclidean', axis=-1, keepdims=True) logit_scale = tf.math.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) loss = None if return_loss: loss = clip_loss(logits_per_text) loss = tf.reshape(loss, (1,)) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return TFCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) class TFCLIPPreTrainedModel(TFPreTrainedModel): config_class = CLIPConfig base_model_prefix = 'clip' _keys_to_ignore_on_load_missing = ['position_ids'] _keys_to_ignore_on_load_unexpected = ['position_ids'] CLIP_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' CLIP_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' CLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to\n return the attentions tensors of all attention layers. See `attentions` under returned tensors for more\n detail. This argument can be used only in eager mode, in graph mode the value in the config will be used\n instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' CLIP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details.\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' class TFCLIPTextModel(TFCLIPPreTrainedModel): config_class = CLIPTextConfig def __init__(self, config: CLIPTextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPTextMainLayer(config, name='clip') @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPTextConfig) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.clip(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'clip', None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) class TFCLIPVisionModel(TFCLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = 'pixel_values' def __init__(self, config: CLIPVisionConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPVisionMainLayer(config, name='clip') @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPVisionConfig) def call(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.clip(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'clip', None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) @add_start_docstrings(CLIP_START_DOCSTRING) class TFCLIPModel(TFCLIPPreTrainedModel): config_class = CLIPConfig def __init__(self, config: CLIPConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.clip = TFCLIPMainLayer(config, name='clip') @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: text_features = self.clip.get_text_features(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return text_features @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: image_features = self.clip.get_image_features(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return image_features @unpack_inputs @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFCLIPOutput, config_class=CLIPConfig) def call(self, input_ids: TFModelInputType | None=None, pixel_values: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]: outputs = self.clip(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return outputs def serving_output(self, output: TFCLIPOutput) -> TFCLIPOutput: return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'clip', None) is not None: with tf.name_scope(self.clip.name): self.clip.build(None) # File: transformers-main/src/transformers/models/clip/processing_clip.py """""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class CLIPProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'CLIPImageProcessor' tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, return_tensors=None, **kwargs): (tokenizer_kwargs, image_processor_kwargs) = ({}, {}) if kwargs: tokenizer_kwargs = {k: v for (k, v) in kwargs.items() if k not in self.image_processor._valid_processor_keys} image_processor_kwargs = {k: v for (k, v) in kwargs.items() if k in self.image_processor._valid_processor_keys} if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **tokenizer_kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **image_processor_kwargs) if text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class @property def feature_extractor(self): warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning) return self.image_processor # File: transformers-main/src/transformers/models/clip/tokenization_clip.py """""" import json import os import unicodedata from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def whitespace_clean(text): text = re.sub('\\s+', ' ', text) text = text.strip() return text def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class CLIPTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|endoftext|>', **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token try: import ftfy self.fix_text = ftfy.fix_text except ImportError: logger.info('ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.') self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False) self.fix_text = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().strip().split('\n')[1:49152 - 256 - 2 + 1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} self.pat = re.compile("<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+", re.IGNORECASE) super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return bos_token + token_ids_0 + eos_token return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return len(bos_token + token_ids_0 + eos_token) * [0] return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + '',) pairs = get_pairs(word) if not pairs: return token + '' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): bpe_tokens = [] if self.fix_text is None: text = ' '.join(self.nlp.tokenize(text)) else: text = whitespace_clean(self.fix_text(text)).lower() for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) byte_array = bytearray([self.byte_decoder[c] for c in text]) text = byte_array.decode('utf-8', errors=self.errors).replace('', ' ').strip() return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error('Vocabulary path ({}) should be a directory'.format(save_directory)) return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) # File: transformers-main/src/transformers/models/clip/tokenization_clip_fast.py """""" from typing import List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_clip import CLIPTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} class CLIPTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = CLIPTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|endoftext|>', **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) if not isinstance(self.backend_tokenizer.pre_tokenizer, pre_tokenizers.Sequence): raise ValueError('The `backend_tokenizer` provided does not match the expected format. The CLIP tokenizer has been heavily modified from transformers version 4.17.0. You need to convert the tokenizer you are using to be compatible with this version.The easiest way to do so is `CLIPTokenizerFast.from_pretrained("path_to_local_folder_or_hub_repo, from_slow=True)`. If you want to use your existing tokenizer, you will have to revert to a version prior to 4.17.0 of transformers.') self._wrap_decode_method_backend_tokenizer() def _wrap_decode_method_backend_tokenizer(self): orig_decode_method = self.backend_tokenizer.decode end_of_word_suffix = self.backend_tokenizer.model.end_of_word_suffix def new_decode_method(*args, **kwargs): text = orig_decode_method(*args, **kwargs) text = text.replace(end_of_word_suffix, ' ').strip() return text self.backend_tokenizer.decode = new_decode_method def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return bos_token + token_ids_0 + eos_token return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token = [self.bos_token_id] eos_token = [self.eos_token_id] if token_ids_1 is None: return len(bos_token + token_ids_0 + eos_token) * [0] return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/clipseg/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_clipseg': ['CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig'], 'processing_clipseg': ['CLIPSegProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_clipseg'] = ['CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation'] if TYPE_CHECKING: from .configuration_clipseg import CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/clipseg/configuration_clipseg.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class CLIPSegTextConfig(PretrainedConfig): model_type = 'clipseg_text_model' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clipseg': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class CLIPSegVisionConfig(PretrainedConfig): model_type = 'clipseg_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clipseg': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class CLIPSegConfig(PretrainedConfig): model_type = 'clipseg' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, extract_layers=[3, 6, 9], reduce_dim=64, decoder_num_attention_heads=4, decoder_attention_dropout=0.0, decoder_hidden_act='quick_gelu', decoder_intermediate_size=2048, conditional_layer=0, use_complex_transposed_convolution=False, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = CLIPSegTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `CLIPSegTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = CLIPSegVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()} for (key, value) in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `CLIPSegVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `CLIPSegTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `CLIPSegVisionConfig` with default values.') self.text_config = CLIPSegTextConfig(**text_config) self.vision_config = CLIPSegVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.extract_layers = extract_layers self.reduce_dim = reduce_dim self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_attention_dropout = decoder_attention_dropout self.decoder_hidden_act = decoder_hidden_act self.decoder_intermediate_size = decoder_intermediate_size self.conditional_layer = conditional_layer self.initializer_factor = 1.0 self.use_complex_transposed_convolution = use_complex_transposed_convolution @classmethod def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: CLIPSegVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/clipseg/convert_clipseg_original_pytorch_to_hf.py """""" import argparse import requests import torch from PIL import Image from transformers import CLIPSegConfig, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig, CLIPTokenizer, ViTImageProcessor def get_clipseg_config(model_name): text_config = CLIPSegTextConfig() vision_config = CLIPSegVisionConfig(patch_size=16) use_complex_transposed_convolution = True if 'refined' in model_name else False reduce_dim = 16 if 'rd16' in model_name else 64 config = CLIPSegConfig.from_text_vision_configs(text_config, vision_config, use_complex_transposed_convolution=use_complex_transposed_convolution, reduce_dim=reduce_dim) return config def rename_key(name): if 'clip_model' in name: name = name.replace('clip_model', 'clip') if 'transformer' in name: if 'visual' in name: name = name.replace('visual.transformer', 'vision_model') else: name = name.replace('transformer', 'text_model') if 'resblocks' in name: name = name.replace('resblocks', 'encoder.layers') if 'ln_1' in name: name = name.replace('ln_1', 'layer_norm1') if 'ln_2' in name: name = name.replace('ln_2', 'layer_norm2') if 'c_fc' in name: name = name.replace('c_fc', 'fc1') if 'c_proj' in name: name = name.replace('c_proj', 'fc2') if 'attn' in name and 'self' not in name: name = name.replace('attn', 'self_attn') if 'token_embedding' in name: name = name.replace('token_embedding', 'text_model.embeddings.token_embedding') if 'positional_embedding' in name and 'visual' not in name: name = name.replace('positional_embedding', 'text_model.embeddings.position_embedding.weight') if 'ln_final' in name: name = name.replace('ln_final', 'text_model.final_layer_norm') if 'visual.class_embedding' in name: name = name.replace('visual.class_embedding', 'vision_model.embeddings.class_embedding') if 'visual.conv1' in name: name = name.replace('visual.conv1', 'vision_model.embeddings.patch_embedding') if 'visual.positional_embedding' in name: name = name.replace('visual.positional_embedding', 'vision_model.embeddings.position_embedding.weight') if 'visual.ln_pre' in name: name = name.replace('visual.ln_pre', 'vision_model.pre_layrnorm') if 'visual.ln_post' in name: name = name.replace('visual.ln_post', 'vision_model.post_layernorm') if 'visual.proj' in name: name = name.replace('visual.proj', 'visual_projection.weight') if 'text_projection' in name: name = name.replace('text_projection', 'text_projection.weight') if 'trans_conv' in name: name = name.replace('trans_conv', 'transposed_convolution') if 'film_mul' in name or 'film_add' in name or 'reduce' in name or ('transposed_convolution' in name): name = 'decoder.' + name if 'blocks' in name: name = name.replace('blocks', 'decoder.layers') if 'linear1' in name: name = name.replace('linear1', 'mlp.fc1') if 'linear2' in name: name = name.replace('linear2', 'mlp.fc2') if 'norm1' in name and 'layer_' not in name: name = name.replace('norm1', 'layer_norm1') if 'norm2' in name and 'layer_' not in name: name = name.replace('norm2', 'layer_norm2') return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if key.startswith('clip_model') and 'attn.in_proj' in key: key_split = key.split('.') if 'visual' in key: layer_num = int(key_split[4]) dim = config.vision_config.hidden_size prefix = 'vision_model' else: layer_num = int(key_split[3]) dim = config.text_config.hidden_size prefix = 'text_model' if 'weight' in key: orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.weight'] = val[:dim, :] orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.weight'] = val[dim:dim * 2, :] orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.weight'] = val[-dim:, :] else: orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.q_proj.bias'] = val[:dim] orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.k_proj.bias'] = val[dim:dim * 2] orig_state_dict[f'clip.{prefix}.encoder.layers.{layer_num}.self_attn.v_proj.bias'] = val[-dim:] elif 'self_attn' in key and 'out_proj' not in key: key_split = key.split('.') layer_num = int(key_split[1]) dim = config.reduce_dim if 'weight' in key: orig_state_dict[f'decoder.layers.{layer_num}.self_attn.q_proj.weight'] = val[:dim, :] orig_state_dict[f'decoder.layers.{layer_num}.self_attn.k_proj.weight'] = val[dim:dim * 2, :] orig_state_dict[f'decoder.layers.{layer_num}.self_attn.v_proj.weight'] = val[-dim:, :] else: orig_state_dict[f'decoder.layers.{layer_num}.self_attn.q_proj.bias'] = val[:dim] orig_state_dict[f'decoder.layers.{layer_num}.self_attn.k_proj.bias'] = val[dim:dim * 2] orig_state_dict[f'decoder.layers.{layer_num}.self_attn.v_proj.bias'] = val[-dim:] else: new_name = rename_key(key) if 'visual_projection' in new_name or 'text_projection' in new_name: val = val.T orig_state_dict[new_name] = val return orig_state_dict def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) return image def convert_clipseg_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub): config = get_clipseg_config(model_name) model = CLIPSegForImageSegmentation(config) model.eval() state_dict = torch.load(checkpoint_path, map_location='cpu') for key in state_dict.copy().keys(): if key.startswith('model'): state_dict.pop(key, None) state_dict = convert_state_dict(state_dict, config) (missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False) if missing_keys != ['clip.text_model.embeddings.position_ids', 'clip.vision_model.embeddings.position_ids']: raise ValueError('Missing keys that are not expected: {}'.format(missing_keys)) if unexpected_keys != ['decoder.reduce.weight', 'decoder.reduce.bias']: raise ValueError(f'Unexpected keys: {unexpected_keys}') image_processor = ViTImageProcessor(size=352) tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32') processor = CLIPSegProcessor(image_processor=image_processor, tokenizer=tokenizer) image = prepare_img() text = ['a glass', 'something to fill', 'wood', 'a jar'] inputs = processor(text=text, images=[image] * len(text), padding='max_length', return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) expected_conditional = torch.tensor([0.111, -0.1882, 0.1645]) expected_pooled_output = torch.tensor([0.2692, -0.7197, -0.1328]) if model_name == 'clipseg-rd64-refined': expected_masks_slice = torch.tensor([[-10.0407, -9.9431, -10.2646], [-9.9751, -9.7064, -9.9586], [-9.6891, -9.5645, -9.9618]]) elif model_name == 'clipseg-rd64': expected_masks_slice = torch.tensor([[-7.2877, -7.2711, -7.2463], [-7.2652, -7.278, -7.252], [-7.2239, -7.2204, -7.2001]]) elif model_name == 'clipseg-rd16': expected_masks_slice = torch.tensor([[-6.3955, -6.4055, -6.4151], [-6.3911, -6.4033, -6.41], [-6.3474, -6.3702, -6.3762]]) else: raise ValueError(f'Model name {model_name} not supported.') assert torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=0.001) assert torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=0.001) assert torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=0.001) print('Looks ok!') if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing model and processor for {model_name} to the hub') model.push_to_hub(f'CIDAS/{model_name}') processor.push_to_hub(f'CIDAS/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='clipseg-rd64', type=str, choices=['clipseg-rd16', 'clipseg-rd64', 'clipseg-rd64-refined'], help='Name of the model. Supported models are: clipseg-rd64, clipseg-rd16 and clipseg-rd64-refined (rd meaning reduce dimension)') parser.add_argument('--checkpoint_path', default='/Users/nielsrogge/Documents/CLIPSeg/clip_plus_rd64-uni.pth', type=str, help='Path to the original checkpoint. Note that the script assumes that the checkpoint includes both CLIP and the decoder weights.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.') args = parser.parse_args() convert_clipseg_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/clipseg/modeling_clipseg.py """""" import copy import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_clipseg import CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'CIDAS/clipseg-rd64-refined' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clipseg_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class CLIPSegOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) @dataclass class CLIPSegDecoderOutput(ModelOutput): logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CLIPSegImageSegmentationOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None conditional_embeddings: torch.FloatTensor = None pooled_output: torch.FloatTensor = None vision_model_output: BaseModelOutputWithPooling = None decoder_output: CLIPSegDecoderOutput = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['vision_model_output', 'decoder_output'] else getattr(self, k).to_tuple() for k in self.keys())) class CLIPSegVisionEmbeddings(nn.Module): def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_position_embeddings(self, new_size): if len(new_size) != 2: raise ValueError('new_size should consist of 2 values') num_patches_one_direction = int(self.num_patches ** 0.5) a = self.position_embedding.weight[1:].T.view(1, self.config.hidden_size, num_patches_one_direction, num_patches_one_direction) b = nn.functional.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(self.config.hidden_size, new_size[0] * new_size[1]).T result = torch.cat([self.position_embedding.weight[:1], b]) return result def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if embeddings.shape[1] != self.num_positions: new_shape = int(math.sqrt(embeddings.shape[1] - 1)) embeddings = embeddings + self.interpolate_position_embeddings((new_shape, new_shape)) embeddings = embeddings.to(embeddings.dtype) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class CLIPSegTextEmbeddings(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class CLIPSegAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class CLIPSegMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class CLIPSegEncoderLayer(nn.Module): def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPSegPreTrainedModel(PreTrainedModel): config_class = CLIPSegConfig base_model_prefix = 'clip' supports_gradient_checkpointing = True def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, CLIPSegTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, CLIPSegVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, CLIPSegAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPSegMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, CLIPSegModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() CLIPSEG_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`CLIPSegConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CLIPSEG_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIPSEG_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLIPSEG_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class CLIPSegEncoder(nn.Module): def __init__(self, config: CLIPSegConfig): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class CLIPSegTextTransformer(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegTextEmbeddings(config) self.encoder = CLIPSegEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.eos_token_id = config.eos_token_id @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1)] else: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class CLIPSegTextModel(CLIPSegPreTrainedModel): config_class = CLIPSegTextConfig _no_split_modules = ['CLIPSegTextEmbeddings', 'CLIPSegEncoderLayer'] def __init__(self, config: CLIPSegTextConfig): super().__init__(config) self.text_model = CLIPSegTextTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class CLIPSegVisionTransformer(nn.Module): def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = CLIPSegEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class CLIPSegVisionModel(CLIPSegPreTrainedModel): config_class = CLIPSegVisionConfig main_input_name = 'pixel_values' def __init__(self, config: CLIPSegVisionConfig): super().__init__(config) self.vision_model = CLIPSegVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings(CLIPSEG_START_DOCSTRING) class CLIPSegModel(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) if not isinstance(config.text_config, CLIPSegTextConfig): raise TypeError(f'config.text_config is expected to be of type CLIPSegTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, CLIPSegVisionConfig): raise TypeError(f'config.vision_config is expected to be of type CLIPSegVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = CLIPSegTextTransformer(text_config) self.vision_model = CLIPSegVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegOutput, config_class=CLIPSegConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPSegOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = clipseg_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return CLIPSegOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) class CLIPSegDecoderLayer(nn.Module): def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states hidden_states = self.layer_norm1(hidden_states) residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states hidden_states = self.layer_norm2(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPSegDecoder(CLIPSegPreTrainedModel): def __init__(self, config: CLIPSegConfig): super().__init__(config) self.conditional_layer = config.conditional_layer self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim) self.film_add = nn.Linear(config.projection_dim, config.reduce_dim) if config.use_complex_transposed_convolution: transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4) self.transposed_convolution = nn.Sequential(nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1), nn.ReLU(), nn.ConvTranspose2d(config.reduce_dim, config.reduce_dim // 2, kernel_size=transposed_kernels[0], stride=transposed_kernels[0]), nn.ReLU(), nn.ConvTranspose2d(config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1])) else: self.transposed_convolution = nn.ConvTranspose2d(config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size) depth = len(config.extract_layers) self.reduces = nn.ModuleList([nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)]) decoder_config = copy.deepcopy(config.vision_config) decoder_config.hidden_size = config.reduce_dim decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size decoder_config.hidden_act = 'relu' self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))]) def forward(self, hidden_states: Tuple[torch.Tensor], conditional_embeddings: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None activations = hidden_states[::-1] output = None for (i, (activation, layer, reduce)) in enumerate(zip(activations, self.layers, self.reduces)): if output is not None: output = reduce(activation) + output else: output = reduce(activation) if i == self.conditional_layer: output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add(conditional_embeddings) output = output.permute(1, 0, 2) layer_outputs = layer(output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions) output = layer_outputs[0] if output_hidden_states: all_hidden_states += (output,) if output_attentions: all_attentions += (layer_outputs[1],) output = output[:, 1:, :].permute(0, 2, 1) size = int(math.sqrt(output.shape[2])) batch_size = conditional_embeddings.shape[0] output = output.view(batch_size, output.shape[1], size, size) logits = self.transposed_convolution(output).squeeze(1) if not return_dict: return tuple((v for v in [logits, all_hidden_states, all_attentions] if v is not None)) return CLIPSegDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_attentions) @add_start_docstrings('\n CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.\n ', CLIPSEG_START_DOCSTRING) class CLIPSegForImageSegmentation(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) self.config = config self.clip = CLIPSegModel(config) self.extract_layers = config.extract_layers self.decoder = CLIPSegDecoder(config) self.post_init() def get_conditional_embeddings(self, batch_size: int=None, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, conditional_pixel_values: Optional[torch.Tensor]=None): if input_ids is not None: if len(input_ids) != batch_size: raise ValueError('Make sure to pass as many prompt texts as there are query images') with torch.no_grad(): conditional_embeddings = self.clip.get_text_features(input_ids, attention_mask=attention_mask, position_ids=position_ids) elif conditional_pixel_values is not None: if len(conditional_pixel_values) != batch_size: raise ValueError('Make sure to pass as many prompt images as there are query images') with torch.no_grad(): conditional_embeddings = self.clip.get_image_features(conditional_pixel_values) else: raise ValueError('Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`') return conditional_embeddings @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegImageSegmentationOutput, config_class=CLIPSegTextConfig) def forward(self, input_ids: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, conditional_pixel_values: Optional[torch.FloatTensor]=None, conditional_embeddings: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CLIPSegOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict with torch.no_grad(): vision_outputs = self.clip.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) pooled_output = self.clip.visual_projection(vision_outputs[1]) hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2] activations = [hidden_states[i + 1] for i in self.extract_layers] if return_dict: vision_outputs = BaseModelOutputWithPooling(last_hidden_state=vision_outputs.last_hidden_state, pooler_output=vision_outputs.pooler_output, hidden_states=vision_outputs.hidden_states if output_hidden_states else None, attentions=vision_outputs.attentions) else: vision_outputs = vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs if conditional_embeddings is None: conditional_embeddings = self.get_conditional_embeddings(batch_size=pixel_values.shape[0], input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, conditional_pixel_values=conditional_pixel_values) else: if conditional_embeddings.shape[0] != pixel_values.shape[0]: raise ValueError('Make sure to pass as many conditional embeddings as there are query images in the batch') if conditional_embeddings.shape[1] != self.config.projection_dim: raise ValueError('Make sure that the feature dimension of the conditional embeddings matches `config.projection_dim`.') decoder_outputs = self.decoder(activations, conditional_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) loss_fn = nn.BCEWithLogitsLoss() loss = loss_fn(logits, labels) if not return_dict: output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs) return (loss,) + output if loss is not None else output return CLIPSegImageSegmentationOutput(loss=loss, logits=logits, conditional_embeddings=conditional_embeddings, pooled_output=pooled_output, vision_model_output=vision_outputs, decoder_output=decoder_outputs) # File: transformers-main/src/transformers/models/clipseg/processing_clipseg.py """""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class CLIPSegProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'ViTImageProcessor' tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs): if text is None and visual_prompt is None and (images is None): raise ValueError('You have to specify either text, visual prompt or images.') if text is not None and visual_prompt is not None: raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if visual_prompt is not None: prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if visual_prompt is not None and images is not None: encoding = {'pixel_values': image_features.pixel_values, 'conditional_pixel_values': prompt_features.pixel_values} return encoding elif text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: encoding = {'conditional_pixel_values': prompt_features.pixel_values} return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class @property def feature_extractor(self): warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning) return self.image_processor # File: transformers-main/src/transformers/models/clvp/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_clvp': ['ClvpConfig', 'ClvpDecoderConfig', 'ClvpEncoderConfig'], 'feature_extraction_clvp': ['ClvpFeatureExtractor'], 'processing_clvp': ['ClvpProcessor'], 'tokenization_clvp': ['ClvpTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_clvp'] = ['ClvpModelForConditionalGeneration', 'ClvpForCausalLM', 'ClvpModel', 'ClvpPreTrainedModel', 'ClvpEncoder', 'ClvpDecoder'] if TYPE_CHECKING: from .configuration_clvp import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig from .feature_extraction_clvp import ClvpFeatureExtractor from .processing_clvp import ClvpProcessor from .tokenization_clvp import ClvpTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clvp import ClvpDecoder, ClvpEncoder, ClvpForCausalLM, ClvpModel, ClvpModelForConditionalGeneration, ClvpPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/clvp/configuration_clvp.py """""" import os from typing import TYPE_CHECKING, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ClvpEncoderConfig(PretrainedConfig): model_type = 'clvp_encoder' def __init__(self, vocab_size=256, hidden_size=768, intermediate_size=1536, projection_dim=768, num_hidden_layers=20, num_attention_heads=12, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.1, dropout=0.1, use_rotary_embedding=True, use_attention_bias=False, summary_type='mean', initializer_factor=1.0, bos_token_id=255, eos_token_id=0, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.dropout = dropout self.use_rotary_embedding = use_rotary_embedding self.use_attention_bias = use_attention_bias self.summary_type = summary_type self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str='text_config', **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_type not in ['text_config', 'speech_config']: raise ValueError(f"We can only load either 'text_config' or 'speech_config' but you are trying to load{config_type}") if config_dict.get('model_type') == 'clvp': config_dict = config_dict[config_type] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ClvpDecoderConfig(PretrainedConfig): model_type = 'clvp_decoder' def __init__(self, vocab_size=8194, max_position_embeddings=608, max_text_tokens=404, hidden_size=1024, num_hidden_layers=30, num_attention_heads=16, n_inner=None, num_mel_attn_blocks=6, activation_function='gelu_new', resid_pdrop=0.1, embd_pdrop=0.1, attention_dropout=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, use_cache=True, bos_token_id=8192, eos_token_id=8193, feature_size=80, use_attention_bias=True, initializer_factor=1.0, decoder_fixing_codes=[83, 45, 45, 248], **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.max_text_tokens = max_text_tokens self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.n_inner = n_inner self.num_mel_attn_blocks = num_mel_attn_blocks self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attention_dropout = attention_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.use_cache = use_cache self.feature_size = feature_size self.use_attention_bias = use_attention_bias self.initializer_factor = initializer_factor self.decoder_fixing_codes = decoder_fixing_codes self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'clvp': config_dict = config_dict['decoder_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class ClvpConfig(PretrainedConfig): model_type = 'clvp' is_composition = True def __init__(self, text_config=None, speech_config=None, decoder_config=None, projection_dim=768, logit_scale_init_value=2.6592, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `ClvpEncoderConfig` with default values.') if speech_config is None: speech_config = {} logger.info('`speech_config` is `None`. initializing the `ClvpEncoderConfig` with default values.') if decoder_config is None: decoder_config = {} logger.info('`decoder_config` is `None`. initializing the `ClvpDecoderConfig` with default values.') self.text_config = ClvpEncoderConfig(**text_config) self.speech_config = ClvpEncoderConfig(**speech_config) self.decoder_config = ClvpDecoderConfig(**decoder_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = initializer_factor @classmethod def from_sub_model_configs(cls, text_config: ClvpEncoderConfig, speech_config: ClvpEncoderConfig, decoder_config: ClvpDecoderConfig, **kwargs): return cls(text_config=text_config.to_dict(), speech_config=speech_config.to_dict(), decoder_config=decoder_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/clvp/convert_clvp_to_hf.py """""" import argparse import os import torch from huggingface_hub import hf_hub_download from transformers import ClvpConfig, ClvpModelForConditionalGeneration _MODELS = {'clvp': 'https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/clvp2.pth', 'decoder': 'https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/autoregressive.pth'} dim = 1024 sub_dim = dim // 16 CLVP_ENCODERS_MAPPING = {'text_transformer.transformer.attn_layers': 'text_encoder_model', 'speech_transformer.transformer.attn_layers': 'speech_encoder_model', 'text_transformer.transformer.norm': 'text_encoder_model.final_layer_norm', 'speech_transformer.transformer.norm': 'speech_encoder_model.final_layer_norm', 'to_text_latent': 'text_encoder_model.projection', 'to_speech_latent': 'speech_encoder_model.projection', 'text_emb': 'text_encoder_model.token_embedding', 'speech_emb': 'speech_encoder_model.token_embedding', '1.wrap.net.0': 'mlp.fc1', '1.wrap.net.3': 'mlp.fc2', '1.wrap': 'self_attn', 'to_out': 'out_proj', 'to_q': 'q_proj', 'to_k': 'k_proj', 'to_v': 'v_proj', 'temperature': 'logit_scale'} CLVP_DECODER_MAPPING = {'conditioning_encoder.init': 'conditioning_encoder.mel_conv', 'conditioning_encoder.attn': 'conditioning_encoder.mel_attn_blocks', 'mel_attn_blocks': 'group_norms', '.norm.weight': '.weight', '.norm.bias': '.bias', 'text_embedding': 'conditioning_encoder.text_token_embedding', 'text_pos_embedding.emb': 'conditioning_encoder.text_position_embedding', 'final_norm': 'speech_decoder_model.final_norm', 'mel_head': 'speech_decoder_model.lm_head', 'gpt.ln_f': 'speech_decoder_model.model.decoder.layer_norm', 'mel_embedding': 'speech_decoder_model.model.decoder.input_embeds_layer', 'mel_pos_embedding.emb': 'speech_decoder_model.model.decoder.position_embeds_layer', 'gpt.h': 'speech_decoder_model.model.decoder.layers', 'ln_1': 'input_layernorm', 'ln_2': 'post_attention_layernorm'} def update_index(present_index): if present_index % 2 == 0: return int(present_index / 2) else: return int((present_index - 1) / 2) def convert_encoder_weights(original_weights): converted_weights = {} original_weights_keys = sorted(original_weights.keys()) for original_key in original_weights_keys: updated_key = original_key if '0.0.g' in updated_key: present_index = updated_key.split('.')[4] if int(present_index) % 2 == 0: updated_key = updated_key.replace('0.0.g', 'input_rmsnorm.weight') else: updated_key = updated_key.replace('0.0.g', 'post_attention_rmsnorm.weight') if 'transformer.attn_layers.layers' in updated_key: present_index = updated_key.split('.')[4] updated_index = update_index(int(present_index)) updated_key = updated_key.replace(f'transformer.attn_layers.layers.{present_index}', f'transformer.attn_layers.layers.{updated_index}') for (k, v) in CLVP_ENCODERS_MAPPING.items(): if k in updated_key: updated_key = updated_key.replace(k, v) converted_weights[updated_key] = original_weights.pop(original_key) return converted_weights def convert_decoder_weights(original_weights): converted_weights = {} original_weights_keys = sorted(original_weights.keys()) for original_key in original_weights_keys: updated_key = original_key if len(updated_key.split('.')) > 3: (index, attr) = (updated_key.split('.')[2], updated_key.split('.')[-1]) if 'attn.c_attn' in updated_key: if attr == 'weight': (slice1, slice2, slice3) = original_weights[updated_key].squeeze(-1).T.split(split_size=dim, dim=0) else: (slice1, slice2, slice3) = original_weights[updated_key].split(split_size=dim, dim=0) converted_weights[f'speech_decoder_model.model.decoder.layers.{index}.attn.q_proj.{attr}'] = slice1 converted_weights[f'speech_decoder_model.model.decoder.layers.{index}.attn.k_proj.{attr}'] = slice2 converted_weights[f'speech_decoder_model.model.decoder.layers.{index}.attn.v_proj.{attr}'] = slice3 continue if 'attn.c_proj' in updated_key: converted_weights[f'speech_decoder_model.model.decoder.layers.{index}.attn.out_proj.{attr}'] = original_weights[updated_key].squeeze(-1).T continue if 'attn.bias' in updated_key or 'attn.masked_bias' in updated_key or 'text_head' in updated_key: original_weights.pop(updated_key) continue if 'qkv' in updated_key: if attr == 'weight': (slice1, slice2, slice3) = original_weights[updated_key].squeeze(-1).split(split_size=dim, dim=0) else: (slice1, slice2, slice3) = original_weights[updated_key].split(split_size=dim, dim=0) indices = torch.arange(dim) (index1, index2, index3) = (indices.unfold(0, sub_dim, sub_dim * 3).flatten(), indices[sub_dim:].unfold(0, sub_dim, sub_dim * 3).flatten(), indices[2 * sub_dim:].unfold(0, sub_dim, sub_dim * 3).flatten()) converted_weights[f'conditioning_encoder.mel_attn_blocks.{index}.q_proj.{attr}'] = torch.concatenate([slice1[index1], slice2[index3], slice3[index2]], axis=0) converted_weights[f'conditioning_encoder.mel_attn_blocks.{index}.k_proj.{attr}'] = torch.concatenate([slice1[index2], slice2[index1], slice3[index3]], axis=0) converted_weights[f'conditioning_encoder.mel_attn_blocks.{index}.v_proj.{attr}'] = torch.concatenate([slice1[index3], slice2[index2], slice3[index1]], axis=0) continue if 'proj_out' in updated_key: converted_weights[f'conditioning_encoder.mel_attn_blocks.{index}.out_proj.{attr}'] = original_weights[updated_key].squeeze(-1) continue for (k, v) in CLVP_DECODER_MAPPING.items(): if k in updated_key: updated_key = updated_key.replace(k, v) converted_weights[updated_key] = original_weights.pop(original_key) return converted_weights def _download(url: str, root: str): repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}" filename = f"{url.split('/')[-2]}/{url.split('/')[-1]}" hf_hub_download(repo_id=repo_id, filename=filename, force_filename=root, local_dir_use_symlinks=False) def convert_clvp_weights(checkpoint_path, pytorch_dump_folder_path): converted_checkpoint = {} for (each_model_name, each_model_url) in _MODELS.items(): each_model_path = os.path.join(checkpoint_path, each_model_url.split('/')[-1]) if not os.path.exists(each_model_path): print(f'\n{each_model_name} was not found! Downloading it to {each_model_path}') _download(url=each_model_url, root=each_model_path) if each_model_name == 'clvp': clvp_checkpoint = torch.load(each_model_path, map_location='cpu') else: decoder_checkpoint = torch.load(each_model_path, map_location='cpu') converted_checkpoint.update(**convert_encoder_weights(clvp_checkpoint)) converted_checkpoint.update(**convert_decoder_weights(decoder_checkpoint)) config = ClvpConfig.from_pretrained('susnato/clvp_dev') model = ClvpModelForConditionalGeneration(config) model.load_state_dict(converted_checkpoint, strict=True) model.save_pretrained(pytorch_dump_folder_path) print(f'Model saved at {pytorch_dump_folder_path}!') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, help='Path to the folder of downloaded checkpoints. (Please enter full path)') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model. (Please enter full path)') args = parser.parse_args() convert_clvp_weights(args.checkpoint_path, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/clvp/feature_extraction_clvp.py """""" from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging logger = logging.get_logger(__name__) class ClvpFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_features', 'attention_mask'] def __init__(self, feature_size=80, sampling_rate=22050, default_audio_length=6, hop_length=256, chunk_length=30, n_fft=1024, padding_value=0.0, mel_norms=None, return_attention_mask=False, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs) self.n_fft = n_fft self.hop_length = hop_length self.chunk_length = chunk_length self.n_samples = chunk_length * sampling_rate self.nb_max_frames = self.n_samples // hop_length self.sampling_rate = sampling_rate self.default_audio_length = default_audio_length self.mel_norms = mel_norms self.mel_filters = mel_filter_bank(num_frequency_bins=1 + n_fft // 2, num_mel_filters=feature_size, min_frequency=0.0, max_frequency=8000.0, sampling_rate=sampling_rate, norm='slaney', mel_scale='htk') def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: log_spec = spectrogram(waveform, window_function(self.n_fft, 'hann'), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel=None) log_spec = np.log(np.clip(log_spec, a_min=1e-05, a_max=None)) if self.mel_norms is not None: log_spec = log_spec / np.array(self.mel_norms)[:, None] return log_spec def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], sampling_rate: Optional[int]=None, truncation: bool=True, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=True, padding: Optional[str]='max_length', max_length: Optional[int]=None, **kwargs) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [np.asarray([raw_speech]).T] batched_speech = BatchFeature({'input_features': raw_speech}) max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length padded_inputs = self.pad(batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) input_features = padded_inputs.get('input_features').transpose(2, 0, 1) input_features = [self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0]] if isinstance(input_features[0], List): padded_inputs['input_features'] = [np.asarray(feature) for feature in input_features] else: padded_inputs['input_features'] = input_features return padded_inputs.convert_to_tensors(return_tensors) # File: transformers-main/src/transformers/models/clvp/modeling_clvp.py """""" import copy import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...generation import GenerationConfig from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import Conv1D, isin_mps_friendly from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_clvp import ClvpConfig, ClvpDecoderConfig, ClvpEncoderConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'susnato/clvp_dev' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clvp_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) speech_loss = contrastive_loss(similarity.t()) return (caption_loss + speech_loss) / 2.0 def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, v, cos, sin, position_ids, unsqueeze_dim=1): cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin v_embed = v * cos + rotate_half(v) * sin return (q_embed, k_embed, v_embed) def _pad_extra_bos_eos_tokens(input_ids, attention_mask=None, pad_token_id=0, bos_token_id=255, eos_token_id=0, add_bos_token=True, add_eos_token=True): if add_bos_token: input_ids = torch.nn.functional.pad(input_ids, (1, 0), value=bos_token_id) attention_mask = torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask modified_input_ids = input_ids if add_eos_token: modified_input_ids = torch.zeros((input_ids.shape[0], input_ids.shape[1] + 1), dtype=input_ids.dtype, device=input_ids.device) for (i, each_input_id) in enumerate(input_ids): if isin_mps_friendly(each_input_id, pad_token_id).sum(): pos = torch.where(each_input_id == pad_token_id)[0].min() modified_input_ids[i] = torch.concatenate([each_input_id[:pos], torch.tensor([eos_token_id], device=input_ids.device), each_input_id[pos:]]) else: modified_input_ids[i] = torch.nn.functional.pad(each_input_id, (0, 1), value=eos_token_id) attention_mask = torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask return (modified_input_ids, attention_mask) @dataclass class ClvpEncoderOutput(ModelOutput): embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ClvpOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None speech_ids: Optional[torch.LongTensor] = None logits_per_speech: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None speech_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None speech_model_output: BaseModelOutputWithPooling = None decoder_hidden_states: torch.FloatTensor = None text_encoder_hidden_states: torch.FloatTensor = None speech_encoder_hidden_states: torch.FloatTensor = None class ClvpRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' class ClvpRotaryPositionalEmbedding(nn.Module): def __init__(self, config): super().__init__() dim = max(config.projection_dim // (config.num_attention_heads * 2), 32) inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim) self.register_buffer('inv_freq', inv_freq) self.cached_sequence_length = None self.cached_rotary_positional_embedding = None def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: sequence_length = hidden_states.shape[1] if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: return self.cached_rotary_positional_embedding self.cached_sequence_length = sequence_length time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq) freqs = torch.einsum('i,j->ij', time_stamps, self.inv_freq) embeddings = torch.cat((freqs, freqs), dim=-1) self.cached_rotary_positional_embedding = embeddings.unsqueeze(0) return self.cached_rotary_positional_embedding class ClvpSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout if hasattr(config, 'max_position_embeddings'): max_positions = config.max_position_embeddings bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)) bias = bias.view(1, 1, max_positions, max_positions) self.register_buffer('bias', bias, persistent=False) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.FloatTensor, rotary_pos_emb: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]: if rotary_pos_emb is not None and position_ids is None: raise ValueError('`position_ids` must be provided when `rotary_pos_emb` is not None.') (bsz, _, embed_dim) = hidden_states.size() query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if past_key_value is not None: (past_key, past_value) = past_key_value key_states = torch.cat((past_key, key_states), dim=-2) value_states = torch.cat((past_value, value_states), dim=-2) if use_cache is True: present = (key_states, value_states) else: present = None if rotary_pos_emb is not None: rotary_emb_dim = rotary_pos_emb.shape[-1] (query_rot, query_pass) = (query_states[..., :rotary_emb_dim], query_states[..., rotary_emb_dim:]) (key_rot, key_pass) = (key_states[..., :rotary_emb_dim], key_states[..., rotary_emb_dim:]) (value_rot, value_pass) = (value_states[..., :rotary_emb_dim], value_states[..., rotary_emb_dim:]) (cos, sin) = (rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0)) (query_rot, key_rot, value_rot) = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids) query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) value_states = torch.cat((value_rot, value_pass), dim=-1) tgt_len = query_states.shape[2] src_len = key_states.shape[2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_probs, value_states) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, present, attn_weights) class ClvpGatedLinearUnit(nn.Module): def __init__(self, config): super().__init__() self.activation_fn = ACT2FN[config.hidden_act] self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: (hidden_states, gate) = self.proj(hidden_states).chunk(2, dim=-1) return hidden_states * self.activation_fn(gate) class ClvpEncoderMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.fc1 = ClvpGatedLinearUnit(config) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout_layer = nn.Dropout(config.dropout) def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.fc1(hidden_states) hidden_states = self.dropout_layer(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class ClvpEncoderLayer(nn.Module): def __init__(self, config: ClvpConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.self_attn = ClvpSelfAttention(config) self.mlp = ClvpEncoderMLP(config) self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.FloatTensor, rotary_pos_emb: torch.FloatTensor, attention_mask: torch.LongTensor, position_ids: torch.LongTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.input_rmsnorm(hidden_states) attention_outputs = self.self_attn(hidden_states=hidden_states, rotary_pos_emb=rotary_pos_emb, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_rmsnorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[-1],) return outputs class ClvpDecoderMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ClvpDecoderLayer(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = ClvpSelfAttention(config) self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = ClvpDecoderMLP(inner_dim, config) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) attn_outputs = self.attn(hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class ClvpConditioningEncoder(nn.Module): def __init__(self, config: ClvpConfig): super().__init__() self.text_config = config.text_config self.decoder_config = config.decoder_config self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size) self.text_position_embedding = nn.Embedding(self.decoder_config.max_text_tokens, self.decoder_config.hidden_size) self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1) num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size) self.group_norms = nn.ModuleList([nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-05, affine=True) for _ in range(self.decoder_config.num_mel_attn_blocks)]) self.mel_attn_blocks = nn.ModuleList([ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)]) self.gradient_checkpointing = False def compute_groupnorm_groups(self, channels: int, groups: int=32): if channels <= 16: groups = 8 elif channels <= 64: groups = 16 while channels % groups != 0: groups = int(groups / 2) if groups <= 2: raise ValueError(f'Number of groups for the GroupNorm must be greater than 2, but it is {groups}.Please consider using a different `hidden_size`') return groups def forward(self, input_features: torch.FloatTensor, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = input_ids.size() elif inputs_embeds is not None: (batch_size, seq_length) = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device) (input_ids, attention_mask) = _pad_extra_bos_eos_tokens(input_ids, attention_mask, bos_token_id=self.text_config.bos_token_id, eos_token_id=self.text_config.eos_token_id) inputs_embeds = self.text_token_embedding(input_ids) position_ids = attention_mask.cumsum(-1) - 1 position_embeds = self.text_position_embedding(position_ids) text_embeds = inputs_embeds + position_embeds if self.gradient_checkpointing and self.training: mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features) for (i, mel_attn_block) in enumerate(self.mel_attn_blocks): residual_mel_spec = mel_spec.transpose(1, 2) mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2) mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec mel_spec = mel_spec.transpose(1, 2) else: mel_spec = self.mel_conv(input_features) for (i, mel_attn_block) in enumerate(self.mel_attn_blocks): residual_mel_spec = mel_spec.transpose(1, 2) mel_spec = self.group_norms[i](mel_spec).transpose(1, 2) mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec mel_spec = mel_spec.transpose(1, 2) mel_spec = mel_spec[:, :, 0] mel_spec = mel_spec.unsqueeze(1) if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1: text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1) elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1: mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1) elif text_embeds.shape[0] != mel_spec.shape[0]: raise ValueError(f'The number of texts and number of audios must be same. Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios') return torch.concat([mel_spec, text_embeds], dim=1) class ClvpPreTrainedModel(PreTrainedModel): config_class = ClvpConfig base_model_prefix = 'clvp' supports_gradient_checkpointing = True _skip_keys_device_placement = 'past_key_values' def _init_weights(self, module): factor = self.config.initializer_factor if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=factor * 0.02) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, ClvpEncoderMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, 'proj') else module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ClvpEncoder): config = self.config.get_text_config() factor = config.initializer_factor module.projection.weight.data.normal_(mean=0.0, std=factor * config.hidden_size ** (-0.5)) elif isinstance(module, ClvpConditioningEncoder): module.mel_conv.weight.data.normal_(mean=0.0, std=factor) module.mel_conv.bias.data.zero_() elif isinstance(module, ClvpForCausalLM): for (name, p) in module.named_parameters(): if name == 'c_proj.weight': p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers)) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CLVP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ClvpConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' CLVP_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`):\n Indicates log mel-spectrogram representations for audio returned by [`ClvpFeatureExtractor`].\n conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):\n inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.\n text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):\n inputs_embeds for the text encoder model passed in place of `input_ids`.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' CLVP_DECODER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for\n `past_key_values`. In other words, the `attention_mask` always has to have the length:\n `len(past_key_values) + len(input_ids)`\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class ClvpEncoder(ClvpPreTrainedModel): def __init__(self, config: ClvpConfig): super().__init__(config) self.config = config self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.sequence_summary = SequenceSummary(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.token_embedding def set_input_embeddings(self, value): self.token_embedding = value def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) inputs_embeds = self.token_embedding(input_ids) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = torch.utils.checkpoint.checkpoint(encoder_layer.__call__, hidden_states, rotary_pos_emb, attention_mask, position_ids) else: layer_outputs = encoder_layer(hidden_states, rotary_pos_emb, attention_mask, position_ids, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) last_hidden_state = hidden_states last_hidden_state = self.final_layer_norm(last_hidden_state) pooled_output = self.sequence_summary(last_hidden_state) embeds = self.projection(pooled_output) if not return_dict: return tuple((v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None)) return ClvpEncoderOutput(embeds=embeds, last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_states, attentions=all_attentions) class ClvpDecoder(ClvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size) self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size) self.drop = nn.Dropout(self.config.embd_pdrop) self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)]) self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.input_embeds_layer = new_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.layers[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] inputs_embeds.shape[0] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_key_values_length = 0 past_key_values = tuple([None] * len(self.layers)) else: past_key_values_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) if inputs_embeds is None: inputs_embeds = self.input_embeds_layer(input_ids) position_embeds = self.position_embeds_layer(position_ids) inputs_embeds = inputs_embeds + position_embeds attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds if token_type_ids is not None: token_type_embeds = self.input_embeds_layer(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for (i, (block, past_key_value)) in enumerate(zip(self.layers, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = torch.utils.checkpoint.checkpoint(block.__call__, hidden_states, None, attention_mask, position_ids, head_mask[i]) else: outputs = block(hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare Clvp decoder model outputting raw hidden-states without any specific head on top.', CLVP_START_DOCSTRING) class ClvpModel(ClvpPreTrainedModel): def __init__(self, config: ClvpDecoderConfig): super().__init__(config) self.config = config self.decoder = ClvpDecoder(self.config) self.post_init() def get_input_embeddings(self): return self.decoder.input_embeds_layer def set_input_embeddings(self, value): self.decoder.input_embeds_layer = value def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) @add_start_docstrings('The CLVP decoder model with a language modelling head on top.', CLVP_START_DOCSTRING) class ClvpForCausalLM(ClvpPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.model = ClvpModel(self.config) self.final_norm = nn.LayerNorm(self.config.hidden_size) self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True) self.post_init() def get_input_embeddings(self): return self.model.decoder.input_embeds_layer def set_input_embeddings(self, new_embeddings): self.model.decoder.input_embeds_layer = new_embeddings def _prepare_model_inputs(self, inputs: Optional[torch.Tensor]=None, bos_token_id: Optional[int]=None, model_kwargs: Optional[Dict[str, torch.Tensor]]=None) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: input_name = self.main_input_name model_kwargs = {k: v for (k, v) in model_kwargs.items() if v is not None} inputs_kwarg = model_kwargs.pop(input_name, None) if inputs_kwarg is not None and inputs is not None: raise ValueError(f'`inputs`: {inputs}` were passed alongside {input_name} which is not allowed.Make sure to either pass {inputs} or {input_name}=...') elif inputs_kwarg is not None: inputs = inputs_kwarg if input_name == 'input_ids' and 'inputs_embeds' in model_kwargs: model_kwargs['input_ids'] = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs=model_kwargs) (inputs, input_name) = (model_kwargs['inputs_embeds'], 'inputs_embeds') conditioning_embeds = model_kwargs.get('conditioning_embeds', None) if conditioning_embeds is not None: mel_start_token_embedding = self.model.decoder.input_embeds_layer(torch.full((conditioning_embeds.shape[0], 1), fill_value=self.config.bos_token_id, device=conditioning_embeds.device)) mel_start_token_embedding += self.model.decoder.position_embeds_layer(torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device)) conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1) if hasattr(model_kwargs, 'attention_mask'): position_ids = model_kwargs['attention_mask'].long().cumsum(-1) - 1 else: position_ids = torch.range(0, conditioning_embeds.shape[1] - 1, dtype=torch.long, device=conditioning_embeds.device) position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1) model_kwargs['inputs_embeds'] = conditioning_embeds - self.model.decoder.position_embeds_layer(position_ids) model_kwargs['input_ids'] = torch.ones((model_kwargs['inputs_embeds'].shape[0], 1), dtype=torch.long, device=self.device) * self.config.bos_token_id return (model_kwargs['inputs_embeds'], 'inputs_embeds', model_kwargs) inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) return (inputs, input_name, model_kwargs) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, conditioning_embeds=None, **kwargs): input_ids_length = input_ids.shape[-1] token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None if conditioning_embeds is not None and past_key_values is not None: position_ids = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device) if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'token_type_ids': token_type_ids}) return model_inputs @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] lm_logits = self.final_norm(hidden_states) lm_logits = self.lm_head(lm_logits) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('The composite CLVP model with a text encoder, speech encoder and speech decoder model.The speech decoder model generates the speech_ids from the text and the text encoder and speech encoder workstogether to filter out the best speech_ids.', CLVP_START_DOCSTRING) class ClvpModelForConditionalGeneration(ClvpPreTrainedModel): config_class = ClvpConfig def __init__(self, config: ClvpConfig): super().__init__(config) if not isinstance(config.text_config, ClvpEncoderConfig): raise TypeError(f'config.text_config is expected to be of type `ClvpEncoderConfig` but is of type {type(config.text_config)}.') if not isinstance(config.speech_config, ClvpEncoderConfig): raise TypeError(f'config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type {type(config.speech_config)}.') if not isinstance(config.decoder_config, ClvpDecoderConfig): raise TypeError(f'config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type {type(config.decoder_config)}.') self.conditioning_encoder = ClvpConditioningEncoder(config) self.speech_decoder_model = ClvpForCausalLM(config.decoder_config) self.text_encoder_model = ClvpEncoder(config.text_config) self.speech_encoder_model = ClvpEncoder(config.speech_config) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor: decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes speech_ids = speech_ids[:, 1:] stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0) speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0]) for (i, each_seq_stop_token_index) in enumerate(stop_token_indices): if each_seq_stop_token_index.sum() == 0: continue stm = each_seq_stop_token_index.argmax() speech_ids[i, stm:] = decoder_fixing_codes[0] if stm - 3 < speech_ids.shape[1]: speech_ids[i, -3:] = torch.tensor([decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long) return speech_ids def get_text_features(self, input_ids: Optional[torch.LongTensor]=None, text_encoder_inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> torch.FloatTensor: outputs = self.text_encoder_model(input_ids=input_ids, inputs_embeds=text_encoder_inputs_embeds, attention_mask=attention_mask) return outputs[0] def get_speech_features(self, speech_ids: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, generation_config: Optional[GenerationConfig]=None, **kwargs) -> torch.FloatTensor: if speech_ids is None: if input_ids is None and conditioning_encoder_inputs_embeds is None or input_features is None: raise ValueError('Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided.') if generation_config is None: generation_config = self.generation_config generation_config.update(**kwargs) conditioning_embeds = self.conditioning_encoder(input_features=input_features, input_ids=input_ids, inputs_embeds=conditioning_encoder_inputs_embeds, attention_mask=attention_mask) speech_ids = self.speech_decoder_model.generate(conditioning_embeds=conditioning_embeds, generation_config=generation_config) speech_ids = self.fix_speech_decoder_output(speech_ids[0]) outputs = self.speech_encoder_model(input_ids=speech_ids, attention_mask=attention_mask) return outputs[0] @add_start_docstrings_to_model_forward(CLVP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClvpOutput, config_class=ClvpConfig) def forward(self, input_ids: torch.LongTensor=None, input_features: torch.FloatTensor=None, conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor]=None, text_encoder_inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[Tuple, ClvpOutput]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict conditioning_embeds = self.conditioning_encoder(input_features=input_features, input_ids=input_ids, inputs_embeds=conditioning_encoder_inputs_embeds, attention_mask=attention_mask) decoder_outputs = self.speech_decoder_model(inputs_embeds=conditioning_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) speech_ids = decoder_outputs[0] if speech_ids.ndim == 3: speech_ids = speech_ids.argmax(2) speech_ids = self.fix_speech_decoder_output(speech_ids) speech_outputs = self.speech_encoder_model(input_ids=speech_ids, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_encoder_model(input_ids=input_ids, inputs_embeds=text_encoder_inputs_embeds, attention_mask=attention_mask, output_hidden_states=output_hidden_states, return_dict=return_dict) speech_embeds = speech_outputs[0] text_embeds = text_outputs[0] speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale logits_per_speech = logits_per_text.t() loss = None if return_loss: loss = clvp_loss(logits_per_text) if not return_dict: output = (logits_per_speech, logits_per_text, text_embeds, speech_embeds, text_outputs[2], speech_outputs[2]) if output_hidden_states: output += (decoder_outputs[-1], text_outputs[-1], speech_outputs[-1]) return (loss,) + output if loss is not None else output return ClvpOutput(loss=loss, logits_per_speech=logits_per_speech, logits_per_text=logits_per_text, text_embeds=text_embeds, speech_embeds=speech_embeds, text_model_output=text_outputs[2], speech_model_output=speech_outputs[2], decoder_hidden_states=decoder_outputs.hidden_states, text_encoder_hidden_states=text_outputs.hidden_states, speech_encoder_hidden_states=speech_outputs.hidden_states) @torch.no_grad() def generate(self, input_ids: torch.LongTensor=None, input_features: torch.FloatTensor=None, attention_mask: Optional[torch.LongTensor]=None, generation_config: Optional[GenerationConfig]=None, pad_to_max_mel_tokens: Optional[int]=None, output_hidden_states: Optional[bool]=None, **kwargs): sequence_length = input_ids.shape[-1] if sequence_length > self.config.decoder_config.max_text_tokens - 3: raise ValueError(f'Maximum sequence length reached! Found input_ids of length {sequence_length}.Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}') if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) (input_ids, attention_mask) = _pad_extra_bos_eos_tokens(input_ids, attention_mask, add_bos_token=False, bos_token_id=self.config.text_config.bos_token_id, eos_token_id=self.config.text_config.eos_token_id) conditioning_embeds = self.conditioning_encoder(input_features=input_features, input_ids=input_ids, attention_mask=attention_mask) decoder_outputs = self.speech_decoder_model.generate(conditioning_embeds=conditioning_embeds, generation_config=generation_config, output_hidden_states=output_hidden_states, return_dict=generation_config.return_dict_in_generate) if isinstance(decoder_outputs, ModelOutput): speech_ids = decoder_outputs.sequences if pad_to_max_mel_tokens is not None: padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1] speech_ids = torch.nn.functional.pad(speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id) speech_ids = self.fix_speech_decoder_output(speech_ids) speech_outputs = self.speech_encoder_model(input_ids=speech_ids, output_hidden_states=output_hidden_states, return_dict=generation_config.return_dict_in_generate) text_outputs = self.text_encoder_model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=output_hidden_states, return_dict=generation_config.return_dict_in_generate) speech_embeds = speech_outputs[0] text_embeds = text_outputs[0] speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale logits_per_speech = logits_per_text.t() if not generation_config.return_dict_in_generate: output = (speech_ids, logits_per_speech, logits_per_text, text_embeds, speech_embeds, text_outputs[2], speech_outputs[2]) if output_hidden_states: output += (decoder_outputs[-1], text_outputs[-1], speech_outputs[-1]) return output return ClvpOutput(speech_ids=speech_ids, logits_per_speech=logits_per_speech, logits_per_text=logits_per_text, text_embeds=text_embeds, speech_embeds=speech_embeds, text_model_output=text_outputs[2], speech_model_output=speech_outputs[2], decoder_hidden_states=decoder_outputs.hidden_states, text_encoder_hidden_states=text_outputs.hidden_states, speech_encoder_hidden_states=speech_outputs.hidden_states) # File: transformers-main/src/transformers/models/clvp/number_normalizer.py """""" import re class EnglishNormalizer: def __init__(self): self._abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort')]] self.ones = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] self.teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'] self.tens = ['', '', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'] def number_to_words(self, num: int) -> str: if num == 0: return 'zero' elif num < 0: return 'minus ' + self.number_to_words(abs(num)) elif num < 10: return self.ones[num] elif num < 20: return self.teens[num - 10] elif num < 100: return self.tens[num // 10] + ('-' + self.number_to_words(num % 10) if num % 10 != 0 else '') elif num < 1000: return self.ones[num // 100] + ' hundred' + (' ' + self.number_to_words(num % 100) if num % 100 != 0 else '') elif num < 1000000: return self.number_to_words(num // 1000) + ' thousand' + (', ' + self.number_to_words(num % 1000) if num % 1000 != 0 else '') elif num < 1000000000: return self.number_to_words(num // 1000000) + ' million' + (', ' + self.number_to_words(num % 1000000) if num % 1000000 != 0 else '') elif num < 1000000000000: return self.number_to_words(num // 1000000000) + ' billion' + (', ' + self.number_to_words(num % 1000000000) if num % 1000000000 != 0 else '') elif num < 1000000000000000: return self.number_to_words(num // 1000000000000) + ' trillion' + (', ' + self.number_to_words(num % 1000000000000) if num % 1000000000000 != 0 else '') elif num < 1000000000000000000: return self.number_to_words(num // 1000000000000000) + ' quadrillion' + (', ' + self.number_to_words(num % 1000000000000000) if num % 1000000000000000 != 0 else '') else: return 'number out of range' def convert_to_ascii(self, text: str) -> str: return text.encode('ascii', 'ignore').decode('utf-8') def _expand_dollars(self, m: str) -> str: match = m.group(1) parts = match.split('.') if len(parts) > 2: return match + ' dollars' dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = 'dollar' if dollars == 1 else 'dollars' cent_unit = 'cent' if cents == 1 else 'cents' return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) elif dollars: dollar_unit = 'dollar' if dollars == 1 else 'dollars' return '%s %s' % (dollars, dollar_unit) elif cents: cent_unit = 'cent' if cents == 1 else 'cents' return '%s %s' % (cents, cent_unit) else: return 'zero dollars' def _remove_commas(self, m: str) -> str: return m.group(1).replace(',', '') def _expand_decimal_point(self, m: str) -> str: return m.group(1).replace('.', ' point ') def _expand_ordinal(self, num: str) -> str: ordinal_suffixes = {1: 'st', 2: 'nd', 3: 'rd'} num = int(num.group(0)[:-2]) if 10 <= num % 100 and num % 100 <= 20: suffix = 'th' else: suffix = ordinal_suffixes.get(num % 10, 'th') return self.number_to_words(num) + suffix def _expand_number(self, m: str) -> str: num = int(m.group(0)) if num > 1000 and num < 3000: if num == 2000: return 'two thousand' elif num > 2000 and num < 2010: return 'two thousand ' + self.number_to_words(num % 100) elif num % 100 == 0: return self.number_to_words(num // 100) + ' hundred' else: return self.number_to_words(num) else: return self.number_to_words(num) def normalize_numbers(self, text: str) -> str: text = re.sub(re.compile('([0-9][0-9\\,]+[0-9])'), self._remove_commas, text) text = re.sub(re.compile('£([0-9\\,]*[0-9]+)'), '\\1 pounds', text) text = re.sub(re.compile('\\$([0-9\\.\\,]*[0-9]+)'), self._expand_dollars, text) text = re.sub(re.compile('([0-9]+\\.[0-9]+)'), self._expand_decimal_point, text) text = re.sub(re.compile('[0-9]+(st|nd|rd|th)'), self._expand_ordinal, text) text = re.sub(re.compile('[0-9]+'), self._expand_number, text) return text def expand_abbreviations(self, text: str) -> str: for (regex, replacement) in self._abbreviations: text = re.sub(regex, replacement, text) return text def collapse_whitespace(self, text: str) -> str: return re.sub(re.compile('\\s+'), ' ', text) def __call__(self, text): text = self.convert_to_ascii(text) text = text.lower() text = self.normalize_numbers(text) text = self.expand_abbreviations(text) text = self.collapse_whitespace(text) text = text.replace('"', '') return text # File: transformers-main/src/transformers/models/clvp/processing_clvp.py """""" from ...processing_utils import ProcessorMixin class ClvpProcessor(ProcessorMixin): feature_extractor_class = 'ClvpFeatureExtractor' tokenizer_class = 'ClvpTokenizer' model_input_names = ['input_ids', 'input_features', 'attention_mask'] def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, *args, **kwargs): raw_speech = kwargs.pop('raw_speech', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if raw_speech is None and text is None: raise ValueError('You need to specify either an `raw_speech` or `text` input to process.') if raw_speech is not None: inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif raw_speech is None: return encodings else: inputs['input_ids'] = encodings['input_ids'] inputs['attention_mask'] = encodings['attention_mask'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) # File: transformers-main/src/transformers/models/clvp/tokenization_clvp.py """""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging from .number_normalizer import EnglishNormalizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class ClvpTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', unk_token='[UNK]', bos_token='<|endoftext|>', eos_token='[STOP]', pad_token='[STOP]', add_prefix_space=False, add_bos_token=False, add_eos_token=False, **kwargs): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self._normalizer = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, add_eos_token=add_eos_token, **kwargs) @property def vocab_size(self): return len(self.encoder) @property def normalizer(self): if self._normalizer is None: self._normalizer = EnglishNormalizer() return self._normalizer def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if not self.add_bos_token: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) def _tokenize(self, text): bpe_tokens = [] text = self.normalizer(text) for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend(('[SPACE]' if bpe_token == 'Ġ' and '[SPACE]' in self.encoder.keys() else bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def clean_up_tokenization(self, text): text = ''.join(text) vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys()) text = text.replace('[SPACE]', ' ') if '[SPACE]' in vocab_tokens else text text = text.replace('[STOP]', ' ') if '[STOP]' in vocab_tokens else text text = text.replace(self.unk_token, '').replace(' ', ' ').replace(' ', ' ') return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) # File: transformers-main/src/transformers/models/code_llama/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_code_llama'] = ['CodeLlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_code_llama_fast'] = ['CodeLlamaTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_code_llama import CodeLlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_code_llama_fast import CodeLlamaTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/code_llama/tokenization_code_llama.py """""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging, requires_backends logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'} SPIECE_UNDERLINE = '▁' (B_INST, E_INST) = ('[INST]', '[/INST]') (B_SYS, E_SYS) = ('<>\n', '\n<>\n\n') DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information." class CodeLlamaTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, unk_token='', bos_token='', eos_token='', prefix_token='▁
', middle_token='▁', suffix_token='▁', eot_token='▁', fill_token='', suffix_first=False, sp_model_kwargs: Optional[Dict[str, Any]]=None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, additional_special_tokens=None, use_default_system_prompt=False, **kwargs):
        requires_backends(self, 'protobuf')
        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
        bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
        eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
        unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
        self.use_default_system_prompt = use_default_system_prompt
        additional_special_tokens = additional_special_tokens or []
        for token in [prefix_token, middle_token, suffix_token, eot_token]:
            additional_special_tokens += [token] if token is not None else []
        self.vocab_file = vocab_file
        self.add_bos_token = add_bos_token
        self.add_eos_token = add_eos_token
        self._prefix_token = prefix_token
        self._middle_token = middle_token
        self._suffix_token = suffix_token
        self._eot_token = eot_token
        self.fill_token = fill_token
        self.suffix_first = suffix_first
        self.sp_model = self.get_spm_processor()
        super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, eot_token=eot_token, fill_token=fill_token, sp_model_kwargs=self.sp_model_kwargs, suffix_first=suffix_first, clean_up_tokenization_spaces=clean_up_tokenization_spaces, additional_special_tokens=additional_special_tokens, use_default_system_prompt=use_default_system_prompt, **kwargs)

    @property
    def unk_token_length(self):
        return len(self.sp_model.encode(str(self.unk_token)))

    def get_spm_processor(self):
        tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        with open(self.vocab_file, 'rb') as f:
            sp_model = f.read()
            model_pb2 = import_protobuf()
            model = model_pb2.ModelProto.FromString(sp_model)
            normalizer_spec = model_pb2.NormalizerSpec()
            normalizer_spec.add_dummy_prefix = False
            model.normalizer_spec.MergeFrom(normalizer_spec)
            sp_model = model.SerializeToString()
            tokenizer.LoadFromSerializedProto(sp_model)
        return tokenizer

    @property
    def prefix_token(self):
        return self._prefix_token

    @property
    def prefix_id(self):
        if self._prefix_token is None:
            return None
        return self.convert_tokens_to_ids(self.prefix_token)

    @property
    def middle_token(self):
        return self._middle_token

    @property
    def middle_id(self):
        if self._middle_token is None:
            return None
        return self.convert_tokens_to_ids(self.middle_token)

    @property
    def suffix_token(self):
        return self._suffix_token

    @property
    def suffix_id(self):
        if self._suffix_token is None:
            return None
        return self.convert_tokens_to_ids(self.suffix_token)

    @property
    def eot_token(self):
        return self._eot_token

    @property
    def eot_id(self):
        if self._eot_token is None:
            return None
        return self.convert_tokens_to_ids(self.eot_token)

    @property
    def vocab_size(self):
        return self.sp_model.get_piece_size()

    def get_vocab(self):
        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
        vocab.update(self.added_tokens_encoder)
        return vocab

    def tokenize(self, prefix, suffix=None, suffix_first=False, **kwargs) -> List[int]:
        if self.fill_token is not None and self.fill_token in prefix and (suffix is None):
            (prefix, suffix) = prefix.split(self.fill_token)
        if len(prefix) > 0:
            prefix = SPIECE_UNDERLINE + prefix.replace(SPIECE_UNDERLINE, ' ')
        if suffix is None or len(suffix) < 1:
            tokens = super().tokenize(prefix, **kwargs)
            if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and (tokens[1] in self.all_special_tokens):
                tokens = tokens[1:]
            return tokens
        prefix_tokens = self._tokenize(prefix)
        if None in (self.prefix_id, self.middle_id, self.suffix_id):
            raise ValueError(f'The input either includes a `prefix` and a `suffix` used for the infilling task,  or can be split on the {self.fill_token} token, creating a suffix and prefix, but the model does not support `infilling`.')
        suffix_tokens = self._tokenize(suffix)
        suffix_first = suffix_first if suffix_first is not None else self.suffix_first
        if suffix_first:
            return [self.prefix_token, self.suffix_token] + suffix_tokens + [self.middle_token] + prefix_tokens
        else:
            return [self.prefix_token] + prefix_tokens + [self.suffix_token] + suffix_tokens + [self.middle_token]

    def _tokenize(self, text, **kwargs):
        tokens = self.sp_model.encode(text, out_type=str)
        if not text.startswith((SPIECE_UNDERLINE, ' ')):
            return tokens
        tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
        return tokens[self.unk_token_length:] if len(tokens) >= self.unk_token_length else tokens

    def _convert_token_to_id(self, token):
        return self.sp_model.piece_to_id(token)

    def _convert_id_to_token(self, index):
        token = self.sp_model.IdToPiece(index)
        return token

    def convert_tokens_to_string(self, tokens):
        if tokens[0].startswith(SPIECE_UNDERLINE):
            tokens[0] = tokens[0][1:]
        current_sub_tokens = []
        out_string = ''
        for (_, token) in enumerate(tokens):
            if token in self.all_special_tokens:
                out_string += self.sp_model.decode(current_sub_tokens) + token
                current_sub_tokens = []
            else:
                current_sub_tokens.append(token)
        out_string += self.sp_model.decode(current_sub_tokens)
        return out_string

    def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        elif not os.path.isfile(self.vocab_file):
            with open(out_vocab_file, 'wb') as fi:
                content_spiece_model = self.sp_model.serialized_model_proto()
                fi.write(content_spiece_model)
        return (out_vocab_file,)

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        bos_token_id = [self.bos_token_id] if self.add_bos_token else []
        eos_token_id = [self.eos_token_id] if self.add_eos_token else []
        output = bos_token_id + token_ids_0 + eos_token_id
        if token_ids_1 is not None:
            output = output + bos_token_id + token_ids_1 + eos_token_id
        return output

    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        bos_token_id = [1] if self.add_bos_token else []
        eos_token_id = [1] if self.add_eos_token else []
        if token_ids_1 is None:
            return bos_token_id + [0] * len(token_ids_0) + eos_token_id
        return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        bos_token_id = [self.bos_token_id] if self.add_bos_token else []
        eos_token_id = [self.eos_token_id] if self.add_eos_token else []
        output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
        if token_ids_1 is not None:
            output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
        return output

    def __getstate__(self):
        state = self.__dict__.copy()
        state['sp_model'] = None
        state['sp_model_proto'] = self.sp_model.serialized_model_proto()
        return state

    def __setstate__(self, d):
        self.__dict__ = d
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.LoadFromSerializedProto(self.sp_model_proto)

# File: transformers-main/src/transformers/models/code_llama/tokenization_code_llama_fast.py
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import normalizers, processors
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
from ...utils.versions import require_version
require_version('tokenizers>=0.13.3')
if is_sentencepiece_available():
    from .tokenization_code_llama import CodeLlamaTokenizer
else:
    CodeLlamaTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model', 'tokenizer_file': 'tokenizer.json'}
SPIECE_UNDERLINE = '▁'
(B_INST, E_INST) = ('[INST]', '[/INST]')
(B_SYS, E_SYS) = ('<>\n', '\n<>\n\n')
DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."

class CodeLlamaTokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    slow_tokenizer_class = CodeLlamaTokenizer
    padding_side = 'left'
    model_input_names = ['input_ids', 'attention_mask']

    def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='', bos_token='', eos_token='', prefix_token='▁
', middle_token='▁', suffix_token='▁', eot_token='▁', fill_token='', additional_special_tokens=None, add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, **kwargs):
        additional_special_tokens = additional_special_tokens or []
        for token in [prefix_token, middle_token, suffix_token, eot_token]:
            additional_special_tokens += [token] if token is not None else []
        self.use_default_system_prompt = use_default_system_prompt
        super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, additional_special_tokens=additional_special_tokens, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, prefix_token=prefix_token, middle_token=middle_token, suffix_token=suffix_token, eot_token=eot_token, fill_token=fill_token, use_default_system_prompt=use_default_system_prompt, **kwargs)
        self._add_bos_token = add_bos_token
        self._add_eos_token = add_eos_token
        self.update_post_processor()
        self.vocab_file = vocab_file
        self._prefix_token = prefix_token
        self._middle_token = middle_token
        self._suffix_token = suffix_token
        self._eot_token = eot_token
        self.fill_token = fill_token

    @property
    def can_save_slow_tokenizer(self) -> bool:
        return os.path.isfile(self.vocab_file) if self.vocab_file else False

    def update_post_processor(self):
        bos = self.bos_token
        bos_token_id = self.bos_token_id
        if bos is None and self.add_bos_token:
            raise ValueError('add_bos_token = True but bos_token = None')
        eos = self.eos_token
        eos_token_id = self.eos_token_id
        if eos is None and self.add_eos_token:
            raise ValueError('add_eos_token = True but eos_token = None')
        single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}"
        pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}"
        special_tokens = []
        if self.add_bos_token:
            special_tokens.append((bos, bos_token_id))
        if self.add_eos_token:
            special_tokens.append((eos, eos_token_id))
        self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens)

    @property
    def prefix_token(self):
        return self._prefix_token

    @property
    def prefix_id(self):
        if self._prefix_token is None:
            return None
        return self.convert_tokens_to_ids(self.prefix_token)

    @property
    def middle_token(self):
        return self._middle_token

    @property
    def middle_id(self):
        if self._middle_token is None:
            return None
        return self.convert_tokens_to_ids(self.middle_token)

    @property
    def suffix_token(self):
        return self._suffix_token

    @property
    def suffix_id(self):
        if self._suffix_token is None:
            return None
        return self.convert_tokens_to_ids(self.suffix_token)

    @property
    def eot_id(self):
        if self._eot_token is None:
            return None
        return self.convert_tokens_to_ids(self.eot_token)

    @property
    def eot_token(self):
        return self._eot_token

    @property
    def add_eos_token(self):
        return self._add_eos_token

    @property
    def add_bos_token(self):
        return self._add_bos_token

    @add_eos_token.setter
    def add_eos_token(self, value):
        self._add_eos_token = value
        self.update_post_processor()

    @add_bos_token.setter
    def add_bos_token(self, value):
        self._add_bos_token = value
        self.update_post_processor()

    def set_infilling_processor(self, reset, suffix_first=False, add_special_tokens=True):
        if reset:
            self._tokenizer.normalizer = normalizers.Sequence([normalizers.Prepend(prepend='▁'), normalizers.Replace(pattern=' ', content='▁')])
            self.update_post_processor()
            return
        self._tokenizer.normalizer = normalizers.Replace(pattern=' ', content='▁')
        pair = [self.bos_token] if self.add_bos_token and add_special_tokens else []
        special_tokens = [(self.bos_token, self.bos_token_id)] if self.add_bos_token and add_special_tokens else []
        if suffix_first:
            pair += [self.prefix_token, self.suffix_token, '$B', self.middle_token, '$A']
            special_tokens += [(self.prefix_token, self.prefix_id), (self.suffix_token, self.suffix_id), (self.middle_token, self.middle_id)]
        else:
            pair += [self.prefix_token, '$A', self.suffix_token, '$B', self.middle_token]
            special_tokens += [(self.prefix_token, self.prefix_id), (self.suffix_token, self.suffix_id), (self.middle_token, self.middle_id)]
        if self.add_eos_token and add_special_tokens:
            pair += [self.eos_token]
            special_tokens += [(self.eos_token, self.eos_token_id)]
        self._tokenizer.post_processor = processors.TemplateProcessing(single='$A', pair=pair, special_tokens=special_tokens)

    def encode_plus(self, text, text_pair=None, suffix_first=False, add_special_tokens=True, **kwargs):
        text_pair = kwargs.pop('suffix', text_pair)
        if self.fill_token is not None and self.fill_token in text and (text_pair is None):
            (text, text_pair) = text.split(self.fill_token)
        if text_pair is None or len(text_pair) < 1:
            return super().encode_plus(text, text_pair, add_special_tokens=add_special_tokens, **kwargs)
        if None in (self.prefix_id, self.middle_id, self.suffix_id):
            raise ValueError(f'Then input includes a `prefix` and a `suffix` used for the infilling task, the `prefix_id, middle_id, suffix_id` must all be initialized. Current values : {(self.prefix_id, self.middle_id, self.suffix_id)}')
        self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=add_special_tokens)
        tokens = super().encode_plus(' ' + text, text_pair=text_pair, add_special_tokens=True, **kwargs)
        self.set_infilling_processor(True)
        return tokens

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not self.can_save_slow_tokenizer:
            raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        return (out_vocab_file,)

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        if token_ids_1 is None:
            return self.bos_token_id + token_ids_0 + self.eos_token_id
        return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id

# File: transformers-main/src/transformers/models/codegen/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_codegen': ['CodeGenConfig', 'CodeGenOnnxConfig'], 'tokenization_codegen': ['CodeGenTokenizer']}
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_codegen_fast'] = ['CodeGenTokenizerFast']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_codegen'] = ['CodeGenForCausalLM', 'CodeGenModel', 'CodeGenPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_codegen import CodeGenConfig, CodeGenOnnxConfig
    from .tokenization_codegen import CodeGenTokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_codegen_fast import CodeGenTokenizerFast
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_codegen import CodeGenForCausalLM, CodeGenModel, CodeGenPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/codegen/configuration_codegen.py
""""""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
logger = logging.get_logger(__name__)

class CodeGenConfig(PretrainedConfig):
    model_type = 'codegen'
    attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}

    def __init__(self, vocab_size=50400, n_positions=2048, n_ctx=2048, n_embd=4096, n_layer=28, n_head=16, rotary_dim=64, n_inner=None, activation_function='gelu_new', resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=50256, eos_token_id=50256, tie_word_embeddings=False, **kwargs):
        self.vocab_size = vocab_size
        self.n_ctx = n_ctx
        self.n_positions = n_positions
        self.n_embd = n_embd
        self.n_layer = n_layer
        self.n_head = n_head
        self.n_inner = n_inner
        self.rotary_dim = rotary_dim
        self.activation_function = activation_function
        self.resid_pdrop = resid_pdrop
        self.embd_pdrop = embd_pdrop
        self.attn_pdrop = attn_pdrop
        self.layer_norm_epsilon = layer_norm_epsilon
        self.initializer_range = initializer_range
        self.use_cache = use_cache
        self.bos_token_id = bos_token_id
        self.eos_token_id = eos_token_id
        super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)

class CodeGenOnnxConfig(OnnxConfigWithPast):

    def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False):
        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
        if not getattr(self._config, 'pad_token_id', None):
            self._config.pad_token_id = 0

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
        if self.use_past:
            self.fill_with_past_key_values_(common_inputs, direction='inputs')
            common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
        else:
            common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
        return common_inputs

    @property
    def num_layers(self) -> int:
        return self._config.n_layer

    @property
    def num_attention_heads(self) -> int:
        return self._config.n_head

    def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]:
        common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
        ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
        if self.use_past:
            if not is_torch_available():
                raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
            else:
                import torch
                (batch, seqlen) = common_inputs['input_ids'].shape
                past_key_values_length = seqlen + 2
                past_shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads)
                ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)]
        ordered_inputs['attention_mask'] = common_inputs['attention_mask']
        if self.use_past:
            mask_dtype = ordered_inputs['attention_mask'].dtype
            ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
        return ordered_inputs

    @property
    def default_onnx_opset(self) -> int:
        return 13

# File: transformers-main/src/transformers/models/codegen/modeling_codegen.py
""""""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_codegen import CodeGenConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'Salesforce/codegen-2B-mono'
_CONFIG_FOR_DOC = 'CodeGenConfig'

def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int):
    if attention_mask is not None and attention_mask.dim() == 4:
        causal_mask = attention_mask
    else:
        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
        if sequence_length != 1:
            causal_mask = torch.triu(causal_mask, diagonal=1)
        causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
        if attention_mask is not None:
            causal_mask = causal_mask.clone()
            mask_length = attention_mask.shape[-1]
            padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
            padding_mask = padding_mask == 0
            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
    return causal_mask

def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
    inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)
    sinusoid_inp = torch.einsum('i , j -> i j', torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
    return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)

def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
    x1 = x[:, :, :, ::2]
    x2 = x[:, :, :, 1::2]
    x = torch.stack((-x2, x1), dim=-1)
    return x.flatten(-2)

def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
    sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
    cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
    return tensor * cos + rotate_every_two(tensor) * sin

class CodeGenAttention(nn.Module):

    def __init__(self, config, layer_idx=None):
        super().__init__()
        max_positions = config.max_position_embeddings
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
        self.layer_idx = layer_idx
        if layer_idx is None:
            logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
        self.embed_dim = config.hidden_size
        self.num_attention_heads = config.num_attention_heads
        self.head_dim = self.embed_dim // self.num_attention_heads
        if self.head_dim * self.num_attention_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and `num_attention_heads`: {self.num_attention_heads}).')
        self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
        self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
        self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
        self.rotary_dim = config.rotary_dim
        pos_embd_dim = self.rotary_dim or self.embed_dim
        self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)

    def _split_heads(self, x, n_head, dim_head, mp_num):
        reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
        reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
        return reshaped

    def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
        if len(tensor.shape) == 5:
            tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
        elif len(tensor.shape) == 4:
            tensor = tensor.permute(0, 2, 1, 3).contiguous()
        else:
            raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}')
        new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
        return tensor.view(new_shape)

    def _attn(self, query, key, value, attention_mask=None, head_mask=None):
        query = query.to(torch.float32)
        key = key.to(torch.float32)
        attn_weights = torch.matmul(query, key.transpose(-1, -2))
        if attention_mask is not None:
            causal_mask = attention_mask[:, :, :, :key.shape[-2]]
            attn_weights += causal_mask
        attn_weights = attn_weights / self.scale_attn
        attn_weights = nn.Softmax(dim=-1)(attn_weights)
        attn_weights = attn_weights.to(value.dtype)
        attn_weights = self.attn_dropout(attn_weights)
        if head_mask is not None:
            attn_weights = attn_weights * head_mask
        attn_output = torch.matmul(attn_weights, value)
        return (attn_output, attn_weights)

    def forward(self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]]]:
        qkv = self.qkv_proj(hidden_states)
        mp_num = 4
        qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
        local_dim = self.head_dim * self.num_attention_heads // mp_num
        (query, value, key) = torch.split(qkv_split, local_dim, dim=-1)
        query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
        value = value.permute(0, 2, 1, 3)
        embed_positions = self.embed_positions
        if embed_positions.device != position_ids.device:
            embed_positions = embed_positions.to(position_ids.device)
            self.embed_positions = embed_positions
        sincos = embed_positions[position_ids]
        (sin, cos) = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
        if self.rotary_dim is not None:
            k_rot = key[:, :, :, :self.rotary_dim]
            k_pass = key[:, :, :, self.rotary_dim:]
            q_rot = query[:, :, :, :self.rotary_dim]
            q_pass = query[:, :, :, self.rotary_dim:]
            k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
            q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
            key = torch.cat([k_rot, k_pass], dim=-1)
            query = torch.cat([q_rot, q_pass], dim=-1)
        else:
            key = apply_rotary_pos_emb(key, sin, cos)
            query = apply_rotary_pos_emb(query, sin, cos)
        key = key.permute(0, 2, 1, 3)
        query = query.permute(0, 2, 1, 3)
        if layer_past is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_dim, 'cache_position': cache_position}
            (key, value) = layer_past.update(key.to(hidden_states.dtype), value, self.layer_idx, cache_kwargs)
        (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask)
        attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
        attn_output = self.out_proj(attn_output)
        attn_output = self.resid_dropout(attn_output)
        outputs = (attn_output, layer_past)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class CodeGenMLP(nn.Module):

    def __init__(self, intermediate_size, config):
        super().__init__()
        embed_dim = config.n_embd
        self.fc_in = nn.Linear(embed_dim, intermediate_size)
        self.fc_out = nn.Linear(intermediate_size, embed_dim)
        self.act = ACT2FN[config.activation_function]
        self.dropout = nn.Dropout(config.resid_pdrop)

    def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
        hidden_states = self.fc_in(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states = self.fc_out(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class CodeGenBlock(nn.Module):

    def __init__(self, config, layer_idx=None):
        super().__init__()
        inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
        self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
        self.attn = CodeGenAttention(config, layer_idx)
        self.mlp = CodeGenMLP(inner_dim, config)

    def forward(self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
        residual = hidden_states
        hidden_states = self.ln_1(hidden_states)
        attn_outputs = self.attn(hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
        attn_output = attn_outputs[0]
        outputs = attn_outputs[1:]
        feed_forward_hidden_states = self.mlp(hidden_states)
        hidden_states = attn_output + feed_forward_hidden_states + residual
        if use_cache:
            outputs = (hidden_states,) + outputs
        else:
            outputs = (hidden_states,) + outputs[1:]
        return outputs

class CodeGenPreTrainedModel(PreTrainedModel):
    config_class = CodeGenConfig
    base_model_prefix = 'transformer'
    supports_gradient_checkpointing = True
    _no_split_modules = ['CodeGenBlock']
    _skip_keys_device_placement = 'past_key_values'
    _supports_cache_class = True
    _supports_quantized_cache = True
    _supports_static_cache = True

    def __init__(self, *inputs, **kwargs):
        super().__init__(*inputs, **kwargs)

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear,)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
CODEGEN_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CODEGEN_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance, see our\n            [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n"

@add_start_docstrings('The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.', CODEGEN_START_DOCSTRING)
class CodeGenModel(CodeGenPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.embed_dim = config.n_embd
        self.vocab_size = config.vocab_size
        self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
        self.drop = nn.Dropout(config.embd_pdrop)
        self.h = nn.ModuleList([CodeGenBlock(config, layer_idx=i) for i in range(config.n_layer)])
        self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
        self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
        self.gradient_checkpointing = False
        self.post_init()

    def get_input_embeddings(self):
        return self.wte

    def set_input_embeddings(self, new_embeddings):
        self.wte = new_embeddings

    @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if (input_ids is None) ^ (inputs_embeds is not None):
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
        if self.gradient_checkpointing and self.training:
            if use_cache:
                logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
                use_cache = False
        if inputs_embeds is None:
            inputs_embeds = self.wte(input_ids)
        use_legacy_cache = False
        if use_cache and (not isinstance(past_key_values, Cache)):
            use_legacy_cache = True
            past_key_values = DynamicCache.from_legacy_cache(past_key_values)
            if not self.training:
                logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)')
        seq_length = inputs_embeds.shape[1]
        if cache_position is None:
            past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
            cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device)
        if position_ids is None:
            position_ids = cache_position.unsqueeze(0)
        causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
        head_mask = self.get_head_mask(head_mask, self.config.n_layer)
        hidden_states = inputs_embeds
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, seq_length)
            token_type_embeds = self.wte(token_type_ids)
            hidden_states = hidden_states + token_type_embeds
        hidden_states = self.drop(hidden_states)
        output_shape = (-1, seq_length, hidden_states.size(-1))
        next_decoder_cache = None
        all_self_attentions = () if output_attentions else None
        all_hidden_states = () if output_hidden_states else None
        for (i, block) in enumerate(self.h):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            if self.gradient_checkpointing and self.training:
                outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, causal_mask, position_ids, head_mask[i], use_cache, output_attentions, cache_position)
            else:
                outputs = block(hidden_states=hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
            hidden_states = outputs[0]
            if use_cache is True:
                next_decoder_cache = outputs[1]
            if output_attentions:
                all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
        hidden_states = self.ln_f(hidden_states)
        hidden_states = hidden_states.view(output_shape)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        next_cache = None
        if use_cache:
            next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
        if not return_dict:
            return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None))
        return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions)

    def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool):
        if self.config._attn_implementation == 'flash_attention_2':
            if attention_mask is not None and 0.0 in attention_mask:
                return attention_mask
            return None
        past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
        using_static_cache = isinstance(past_key_values, StaticCache)
        if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions):
            if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
                return None
        (dtype, device) = (input_tensor.dtype, input_tensor.device)
        min_dtype = torch.finfo(dtype).min
        sequence_length = input_tensor.shape[1]
        if using_static_cache:
            target_length = past_key_values.get_max_length()
        else:
            target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
        causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
        if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions):
            causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
        return causal_mask

@add_start_docstrings('\n    The CodeGen Model transformer with a language modeling head on top.\n    ', CODEGEN_START_DOCSTRING)
class CodeGenForCausalLM(CodeGenPreTrainedModel):
    _tied_weights_keys = ['lm_head.weight']

    def __init__(self, config):
        super().__init__(config)
        self.transformer = CodeGenModel(config)
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def prepare_inputs_for_generation(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, past_key_values=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs):
        if past_key_values is not None:
            if inputs_embeds is not None:
                input_ids = input_ids[:, -cache_position.shape[0]:]
            elif input_ids.shape[1] != cache_position.shape[0]:
                input_ids = input_ids[:, cache_position]
            if token_type_ids is not None:
                token_type_ids = token_type_ids[:, -input_ids.shape[1]:]
        if attention_mask is not None and position_ids is None:
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)
            if past_key_values:
                position_ids = position_ids[:, -input_ids.shape[1]:]
                position_ids = position_ids.clone(memory_format=torch.contiguous_format)
        if inputs_embeds is not None and cache_position[0] == 0:
            model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None}
        else:
            model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None}
        if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
            if model_inputs['inputs_embeds'] is not None:
                (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape
                device = model_inputs['inputs_embeds'].device
            else:
                (batch_size, sequence_length) = model_inputs['input_ids'].shape
                device = model_inputs['input_ids'].device
            dtype = self.lm_head.weight.dtype
            min_dtype = torch.finfo(dtype).min
            attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size)
        model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask})
        return model_inputs

    @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
        hidden_states = transformer_outputs[0]
        lm_logits = self.lm_head(hidden_states).to(torch.float32)
        loss = None
        if labels is not None:
            labels = labels.to(lm_logits.device)
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
            loss = loss.to(hidden_states.dtype)
        if not return_dict:
            output = (lm_logits,) + transformer_outputs[1:]
            return (loss,) + output if loss is not None else output
        return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)

    @staticmethod
    def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
        return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values))

# File: transformers-main/src/transformers/models/codegen/tokenization_codegen.py
""""""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
import regex as re
from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
if TYPE_CHECKING:
    if is_torch_available():
        import torch
    if is_tf_available():
        import tensorflow as tf
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}

@lru_cache()
def bytes_to_unicode():
    bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1))
    cs = bs[:]
    n = 0
    for b in range(2 ** 8):
        if b not in bs:
            bs.append(b)
            cs.append(2 ** 8 + n)
            n += 1
    cs = [chr(n) for n in cs]
    return dict(zip(bs, cs))

def get_pairs(word):
    pairs = set()
    prev_char = word[0]
    for char in word[1:]:
        pairs.add((prev_char, char))
        prev_char = char
    return pairs

class CodeGenTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask']

    def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, add_bos_token=False, return_token_type_ids=False, **kwargs):
        bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
        eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
        unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
        pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
        self.add_bos_token = add_bos_token
        self.return_token_type_ids = return_token_type_ids
        if self.return_token_type_ids:
            self.model_input_names.append('token_type_ids')
        with open(vocab_file, encoding='utf-8') as vocab_handle:
            self.encoder = json.load(vocab_handle)
        self.decoder = {v: k for (k, v) in self.encoder.items()}
        self.errors = errors
        self.byte_encoder = bytes_to_unicode()
        self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
        with open(merges_file, encoding='utf-8') as merges_handle:
            bpe_merges = merges_handle.read().split('\n')[1:-1]
        bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
        self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
        self.cache = {}
        self.add_prefix_space = add_prefix_space
        self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
        super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, return_token_type_ids=return_token_type_ids, **kwargs)

    @property
    def vocab_size(self):
        return len(self.encoder)

    def get_vocab(self):
        return dict(self.encoder, **self.added_tokens_encoder)

    def bpe(self, token):
        if token in self.cache:
            return self.cache[token]
        word = tuple(token)
        pairs = get_pairs(word)
        if not pairs:
            return token
        while True:
            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
            if bigram not in self.bpe_ranks:
                break
            (first, second) = bigram
            new_word = []
            i = 0
            while i < len(word):
                try:
                    j = word.index(first, i)
                except ValueError:
                    new_word.extend(word[i:])
                    break
                else:
                    new_word.extend(word[i:j])
                    i = j
                if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
                    new_word.append(first + second)
                    i += 2
                else:
                    new_word.append(word[i])
                    i += 1
            new_word = tuple(new_word)
            word = new_word
            if len(word) == 1:
                break
            else:
                pairs = get_pairs(word)
        word = ' '.join(word)
        self.cache[token] = word
        return word

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        if self.add_bos_token:
            bos_token_ids = [self.bos_token_id]
        else:
            bos_token_ids = []
        output = bos_token_ids + token_ids_0
        if token_ids_1 is None:
            return output
        return output + bos_token_ids + token_ids_1

    def _tokenize(self, text):
        bpe_tokens = []
        for token in re.findall(self.pat, text):
            token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
            bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
        return bpe_tokens

    def _convert_token_to_id(self, token):
        return self.encoder.get(token, self.encoder.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.decoder.get(index)

    def convert_tokens_to_string(self, tokens):
        text = ''.join(tokens)
        text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
        return text

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id] if self.sep_token_id is not None else []
        cls = [self.cls_token_id] if self.sep_token_id is not None else []
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
        with open(vocab_file, 'w', encoding='utf-8') as f:
            f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
        index = 0
        with open(merge_file, 'w', encoding='utf-8') as writer:
            writer.write('#version: 0.2\n')
            for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
                    index = token_index
                writer.write(' '.join(bpe_tokens) + '\n')
                index += 1
        return (vocab_file, merge_file)

    def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
        add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
        if is_split_into_words or add_prefix_space:
            text = ' ' + text
        return (text, kwargs)

    def decode(self, token_ids: Union[int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, truncate_before_pattern: Optional[List[str]]=None, **kwargs) -> str:
        token_ids = to_py_obj(token_ids)
        decoded_text = super()._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
        if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
            decoded_text = self.truncate(decoded_text, truncate_before_pattern)
        return decoded_text

    def truncate(self, completion, truncate_before_pattern):

        def find_re(string, pattern, start_pos):
            m = pattern.search(string, start_pos)
            return m.start() if m else -1
        terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
        prints = list(re.finditer('^print', completion, re.MULTILINE))
        if len(prints) > 1:
            completion = completion[:prints[1].start()]
        defs = list(re.finditer('^def', completion, re.MULTILINE))
        if len(defs) > 1:
            completion = completion[:defs[1].start()]
        start_pos = 0
        terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]
        if len(terminals_pos) > 0:
            return completion[:min(terminals_pos)]
        else:
            return completion

# File: transformers-main/src/transformers/models/codegen/tokenization_codegen_fast.py
""""""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
    if is_torch_available():
        import torch
    if is_tf_available():
        import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}

class CodeGenTokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask']
    slow_tokenizer_class = CodeGenTokenizer

    def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, return_token_type_ids=False, **kwargs):
        self.return_token_type_ids = return_token_type_ids
        if self.return_token_type_ids:
            self.model_input_names.append('token_type_ids')
        super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, return_token_type_ids=return_token_type_ids, **kwargs)
        if kwargs.pop('add_bos_token', False):
            model_id = kwargs.pop('name_or_path', '')
            raise ValueError(f"Currenty GPT2's fast tokenizer does NOT support adding a BOS token. Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\nThis issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005. so that the fast tokenizer works correctly.")
        pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
        if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space:
            pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
            pre_tok_state['add_prefix_space'] = add_prefix_space
            self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
        self.add_prefix_space = add_prefix_space

    def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
        return super()._batch_encode_plus(*args, **kwargs)

    def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
        return super()._encode_plus(*args, **kwargs)

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id] if self.sep_token_id is not None else []
        cls = [self.cls_token_id] if self.sep_token_id is not None else []
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        files = self._tokenizer.model.save(save_directory, name=filename_prefix)
        return tuple(files)

    def decode(self, token_ids: Union[int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, truncate_before_pattern: Optional[List[str]]=None, **kwargs) -> str:
        decoded_text = super().decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
        if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
            decoded_text = self.truncate(decoded_text, truncate_before_pattern)
        return decoded_text

    def truncate(self, completion, truncate_before_pattern):

        def find_re(string, pattern, start_pos):
            m = pattern.search(string, start_pos)
            return m.start() if m else -1
        terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
        prints = list(re.finditer('^print', completion, re.MULTILINE))
        if len(prints) > 1:
            completion = completion[:prints[1].start()]
        defs = list(re.finditer('^def', completion, re.MULTILINE))
        if len(defs) > 1:
            completion = completion[:defs[1].start()]
        start_pos = 0
        terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]
        if len(terminals_pos) > 0:
            return completion[:min(terminals_pos)]
        else:
            return completion

# File: transformers-main/src/transformers/models/cohere/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_cohere': ['CohereConfig']}
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_cohere_fast'] = ['CohereTokenizerFast']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_cohere'] = ['CohereForCausalLM', 'CohereModel', 'CoherePreTrainedModel']
if TYPE_CHECKING:
    from .configuration_cohere import CohereConfig
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_cohere_fast import CohereTokenizerFast
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_cohere import CohereForCausalLM, CohereModel, CoherePreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/cohere/configuration_cohere.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class CohereConfig(PretrainedConfig):
    model_type = 'cohere'
    keys_to_ignore_at_inference = ['past_key_values']

    def __init__(self, vocab_size=256000, hidden_size=8192, intermediate_size=22528, logit_scale=0.0625, num_hidden_layers=40, num_attention_heads=64, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=8192, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=5, eos_token_id=255001, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, use_qk_norm=False, **kwargs):
        self.vocab_size = vocab_size
        self.max_position_embeddings = max_position_embeddings
        self.hidden_size = hidden_size
        self.logit_scale = logit_scale
        self.intermediate_size = intermediate_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        if num_key_value_heads is None:
            num_key_value_heads = num_attention_heads
        self.num_key_value_heads = num_key_value_heads
        self.hidden_act = hidden_act
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.use_cache = use_cache
        self.rope_theta = rope_theta
        self.attention_bias = attention_bias
        self.attention_dropout = attention_dropout
        self.use_qk_norm = use_qk_norm
        super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)

# File: transformers-main/src/transformers/models/cohere/modeling_cohere.py
""""""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import ALL_LAYERNORM_LAYERS
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_torchdynamo_compiling, logging, replace_return_docstrings
from .configuration_cohere import CohereConfig
if is_flash_attn_2_available():
    from ...modeling_flash_attention_utils import _flash_attention_forward
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'CohereConfig'

def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int):
    if attention_mask is not None and attention_mask.dim() == 4:
        causal_mask = attention_mask
    else:
        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
        if sequence_length != 1:
            causal_mask = torch.triu(causal_mask, diagonal=1)
        causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
        if attention_mask is not None:
            causal_mask = causal_mask.clone()
            mask_length = attention_mask.shape[-1]
            padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
            padding_mask = padding_mask == 0
            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
    return causal_mask

class CohereLayerNorm(nn.Module):

    def __init__(self, hidden_size=None, eps=1e-05, bias=False):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(hidden_size))
        self.variance_epsilon = eps

    def forward(self, hidden_states):
        input_dtype = hidden_states.dtype
        hidden_states = hidden_states.to(torch.float32)
        mean = hidden_states.mean(-1, keepdim=True)
        variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
        hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon)
        hidden_states = self.weight.to(torch.float32) * hidden_states
        return hidden_states.to(input_dtype)
ALL_LAYERNORM_LAYERS.append(CohereLayerNorm)

class CohereRotaryEmbedding(nn.Module):

    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
        super().__init__()
        self.scaling_factor = scaling_factor
        self.dim = dim
        self.max_position_embeddings = max_position_embeddings
        self.base = base
        inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)
        self.register_buffer('inv_freq', inv_freq, persistent=False)

    @torch.no_grad()
    def forward(self, x, position_ids):
        inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
        position_ids_expanded = position_ids[:, None, :].float()
        device_type = x.device.type
        device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
        with torch.autocast(device_type=device_type, enabled=False):
            freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
            emb = torch.repeat_interleave(freqs, 2, dim=-1)
            cos = emb.cos()
            sin = emb.sin()
        return (cos, sin)

def rotate_half(x):
    x1 = x[..., ::2]
    x2 = x[..., 1::2]
    rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
    return rot_x

def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
    dtype = q.dtype
    q = q.float()
    k = k.float()
    cos = cos.unsqueeze(unsqueeze_dim)
    sin = sin.unsqueeze(unsqueeze_dim)
    q_embed = q * cos + rotate_half(q) * sin
    k_embed = k * cos + rotate_half(k) * sin
    return (q_embed.to(dtype=dtype), k_embed.to(dtype=dtype))

class CohereMLP(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.hidden_size = config.hidden_size
        self.intermediate_size = config.intermediate_size
        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
        self.act_fn = ACT2FN[config.hidden_act]

    def forward(self, x):
        down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
        return down_proj

def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
    (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape
    if n_rep == 1:
        return hidden_states
    hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)

class CohereAttention(nn.Module):

    def __init__(self, config: CohereConfig, layer_idx: Optional[int]=None):
        super().__init__()
        self.config = config
        self.layer_idx = layer_idx
        if layer_idx is None:
            logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
        self.attention_dropout = config.attention_dropout
        self.hidden_size = config.hidden_size
        self.num_heads = config.num_attention_heads
        self.head_dim = self.hidden_size // self.num_heads
        self.num_key_value_heads = config.num_key_value_heads
        self.num_key_value_groups = self.num_heads // self.num_key_value_heads
        self.max_position_embeddings = config.max_position_embeddings
        self.rope_theta = config.rope_theta
        self.is_causal = True
        self.use_qk_norm = config.use_qk_norm
        if self.head_dim * self.num_heads != self.hidden_size:
            raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).')
        if self.use_qk_norm:
            self.q_norm = CohereLayerNorm(hidden_size=(self.num_heads, self.head_dim), eps=config.layer_norm_eps)
            self.k_norm = CohereLayerNorm(hidden_size=(self.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps)
        self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
        self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
        self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
        self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
        self._init_rope()

    def _init_rope(self):
        self.rotary_emb = CohereRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        (bsz, q_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states)
        key_states = self.k_proj(hidden_states)
        value_states = self.v_proj(hidden_states)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
        if self.use_qk_norm:
            query_states = self.q_norm(query_states)
            key_states = self.k_norm(key_states)
        query_states = query_states.transpose(1, 2)
        key_states = key_states.transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
        if attention_mask is not None:
            causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
            attn_weights = attn_weights + causal_mask
        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
        attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
        attn_output = torch.matmul(attn_weights, value_states)
        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.transpose(1, 2).contiguous()
        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
        attn_output = self.o_proj(attn_output)
        if not output_attentions:
            attn_weights = None
        return (attn_output, attn_weights, past_key_value)

class CohereFlashAttention2(CohereAttention):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if isinstance(past_key_value, StaticCache):
            raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers')
        output_attentions = False
        (bsz, q_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states)
        key_states = self.k_proj(hidden_states)
        value_states = self.v_proj(hidden_states)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
        if self.use_qk_norm:
            query_states = self.q_norm(query_states)
            key_states = self.k_norm(key_states)
        query_states = query_states.transpose(1, 2)
        key_states = key_states.transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
        query_states = query_states.transpose(1, 2)
        key_states = key_states.transpose(1, 2)
        value_states = value_states.transpose(1, 2)
        dropout_rate = self.attention_dropout if self.training else 0.0
        input_dtype = query_states.dtype
        if input_dtype == torch.float32:
            if torch.is_autocast_enabled():
                target_dtype = torch.get_autocast_gpu_dtype()
            elif hasattr(self.config, '_pre_quantization_dtype'):
                target_dtype = self.config._pre_quantization_dtype
            else:
                target_dtype = self.q_proj.weight.dtype
            logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
            query_states = query_states.to(target_dtype)
            key_states = key_states.to(target_dtype)
            value_states = value_states.to(target_dtype)
        attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
        attn_output = self.o_proj(attn_output)
        if not output_attentions:
            attn_weights = None
        return (attn_output, attn_weights, past_key_value)

class CohereSdpaAttention(CohereAttention):

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if output_attentions:
            logger.warning_once('CohereModel is using CohereSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
            return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
        (bsz, q_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states)
        key_states = self.k_proj(hidden_states)
        value_states = self.v_proj(hidden_states)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
        if self.use_qk_norm:
            query_states = self.q_norm(query_states)
            key_states = self.k_norm(key_states)
        query_states = query_states.transpose(1, 2)
        key_states = key_states.transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        causal_mask = attention_mask
        if attention_mask is not None:
            causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
        if query_states.device.type == 'cuda' and causal_mask is not None:
            query_states = query_states.contiguous()
            key_states = key_states.contiguous()
            value_states = value_states.contiguous()
        is_causal = True if causal_mask is None and q_len > 1 else False
        attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal)
        attn_output = attn_output.transpose(1, 2).contiguous()
        attn_output = attn_output.view(bsz, q_len, self.hidden_size)
        attn_output = self.o_proj(attn_output)
        return (attn_output, None, past_key_value)
COHERE_ATTENTION_CLASSES = {'eager': CohereAttention, 'flash_attention_2': CohereFlashAttention2, 'sdpa': CohereSdpaAttention}

class CohereDecoderLayer(nn.Module):

    def __init__(self, config: CohereConfig, layer_idx: int):
        super().__init__()
        self.hidden_size = config.hidden_size
        self.self_attn = COHERE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
        self.mlp = CohereMLP(config)
        self.input_layernorm = CohereLayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
        residual = hidden_states
        hidden_states = self.input_layernorm(hidden_states)
        (hidden_states_attention, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
        hidden_states_mlp = self.mlp(hidden_states)
        hidden_states = residual + hidden_states_attention + hidden_states_mlp
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights,)
        if use_cache:
            outputs += (present_key_value,)
        return outputs
COHERE_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`CohereConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'

@add_start_docstrings('The bare Cohere Model outputting raw hidden-states without any specific head on top.', COHERE_START_DOCSTRING)
class CoherePreTrainedModel(PreTrainedModel):
    config_class = CohereConfig
    base_model_prefix = 'model'
    supports_gradient_checkpointing = True
    _no_split_modules = ['CohereDecoderLayer']
    _skip_keys_device_placement = ['past_key_values']
    _supports_flash_attn_2 = True
    _supports_sdpa = True
    _supports_cache_class = True
    _supports_quantized_cache = True
    _supports_static_cache = True

    def _init_weights(self, module):
        std = self.config.initializer_range
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
COHERE_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance, see our\n            [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare Cohere Model outputting raw hidden-states without any specific head on top.', COHERE_START_DOCSTRING)
class CohereModel(CoherePreTrainedModel):

    def __init__(self, config: CohereConfig):
        super().__init__(config)
        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size
        self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
        self.layers = nn.ModuleList([CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
        self.norm = CohereLayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps)
        self.gradient_checkpointing = False
        self.post_init()

    def get_input_embeddings(self):
        return self.embed_tokens

    def set_input_embeddings(self, value):
        self.embed_tokens = value

    @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING)
    def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if (input_ids is None) ^ (inputs_embeds is not None):
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
        if self.gradient_checkpointing and self.training and use_cache:
            logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
            use_cache = False
        if inputs_embeds is None:
            inputs_embeds = self.embed_tokens(input_ids)
        past_seen_tokens = 0
        return_legacy_cache = False
        if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training):
            return_legacy_cache = True
            past_key_values = DynamicCache.from_legacy_cache(past_key_values)
        if cache_position is None:
            past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
            cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
        if position_ids is None:
            position_ids = cache_position.unsqueeze(0)
        causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
        hidden_states = inputs_embeds
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        next_decoder_cache = None
        for decoder_layer in self.layers:
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position)
            else:
                layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
            hidden_states = layer_outputs[0]
            if use_cache:
                next_decoder_cache = layer_outputs[2 if output_attentions else 1]
            if output_attentions:
                all_self_attns += (layer_outputs[1],)
        hidden_states = self.norm(hidden_states)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        next_cache = next_decoder_cache if use_cache else None
        if return_legacy_cache:
            next_cache = next_cache.to_legacy_cache()
        if not return_dict:
            return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None))
        return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)

    def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool):
        if self.config._attn_implementation == 'flash_attention_2':
            if attention_mask is not None and 0.0 in attention_mask:
                return attention_mask
            return None
        past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
        using_static_cache = isinstance(past_key_values, StaticCache)
        if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions):
            if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
                return None
        (dtype, device) = (input_tensor.dtype, input_tensor.device)
        min_dtype = torch.finfo(dtype).min
        sequence_length = input_tensor.shape[1]
        if using_static_cache:
            target_length = past_key_values.get_max_length()
        else:
            target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
        causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
        if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions):
            causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
        return causal_mask

class CohereForCausalLM(CoherePreTrainedModel):
    _tied_weights_keys = ['lm_head.weight']

    def __init__(self, config):
        super().__init__(config)
        self.model = CohereModel(config)
        self.vocab_size = config.vocab_size
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
        self.logit_scale = config.logit_scale
        self.tie_word_embeddings = config.tie_word_embeddings
        self.post_init()

    def get_input_embeddings(self):
        return self.model.embed_tokens

    def set_input_embeddings(self, value):
        self.model.embed_tokens = value

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def set_decoder(self, decoder):
        self.model = decoder

    def get_decoder(self):
        return self.model

    @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, num_logits_to_keep: int=0) -> Union[Tuple, CausalLMOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
        hidden_states = outputs[0]
        if labels is None and (not is_torchdynamo_compiling()):
            logger.warning_once('Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)')
        logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]).float()
        logits = logits * self.logit_scale
        loss = None
        if labels is not None:
            logits = logits.float()
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss_fct = CrossEntropyLoss()
            shift_logits = shift_logits.view(-1, self.config.vocab_size)
            shift_labels = shift_labels.view(-1)
            shift_labels = shift_labels.to(shift_logits.device)
            loss = loss_fct(shift_logits, shift_labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, num_logits_to_keep=None, **kwargs):
        if past_key_values is not None:
            if inputs_embeds is not None:
                input_ids = input_ids[:, -cache_position.shape[0]:]
            elif input_ids.shape[1] != cache_position.shape[0]:
                input_ids = input_ids[:, cache_position]
        if attention_mask is not None and position_ids is None:
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)
            if past_key_values:
                position_ids = position_ids[:, -input_ids.shape[1]:]
                position_ids = position_ids.clone(memory_format=torch.contiguous_format)
        if inputs_embeds is not None and cache_position[0] == 0:
            model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None}
        else:
            model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None}
        if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
            if model_inputs['inputs_embeds'] is not None:
                (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape
                device = model_inputs['inputs_embeds'].device
            else:
                (batch_size, sequence_length) = model_inputs['input_ids'].shape
                device = model_inputs['input_ids'].device
            dtype = self.lm_head.weight.dtype
            min_dtype = torch.finfo(dtype).min
            attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size)
        if num_logits_to_keep is not None:
            model_inputs['num_logits_to_keep'] = num_logits_to_keep
        model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask})
        return model_inputs

# File: transformers-main/src/transformers/models/cohere/tokenization_cohere_fast.py
import pickle
from typing import Dict, List, Literal, Union
from tokenizers import processors
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from ...utils.versions import require_version
require_version('tokenizers>=0.13.3')
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'tokenizer_file': 'tokenizer.json'}
PRETRAINED_VOCAB_FILES_MAP = {'tokenizer_file': {'Cohere/Command-nightly': 'https://huggingface.co/Cohere/Command-nightly/blob/main/tokenizer.json'}}
DEFAULT_SYSTEM_PROMPT = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.'
DEFAULT_RAG_PREAMBLE = "## Task and Context\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.\n\n## Style Guide\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling."

class CohereTokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    padding_side = 'left'
    model_input_names = ['input_ids', 'attention_mask']
    slow_tokenizer_class = None

    def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='', bos_token='', eos_token='<|END_OF_TURN_TOKEN|>', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, add_prefix_space=False, **kwargs):
        super().__init__(vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, **kwargs)
        self._add_bos_token = add_bos_token
        self._add_eos_token = add_eos_token
        self.update_post_processor()
        self.use_default_system_prompt = use_default_system_prompt
        self.vocab_file = vocab_file
        self.grounded_generation_template = kwargs.pop('grounded_generation_template', None)
        self.tool_use_template = kwargs.pop('tool_use_template', None)
        pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer)
        decoder_state = pickle.dumps(self.backend_tokenizer.decoder)
        if add_prefix_space:
            pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
            decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
        self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state)
        self.backend_tokenizer.decoder = pickle.loads(decoder_state)
        self.add_prefix_space = add_prefix_space

    def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        if not (self.add_prefix_space or not is_split_into_words):
            raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
        return super()._batch_encode_plus(*args, **kwargs)

    def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        if not (self.add_prefix_space or not is_split_into_words):
            raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
        return super()._encode_plus(*args, **kwargs)

    def update_post_processor(self):
        bos = self.bos_token
        bos_token_id = self.bos_token_id
        if bos is None and self.add_bos_token:
            raise ValueError('add_bos_token = True but bos_token = None')
        eos = self.eos_token
        eos_token_id = self.eos_token_id
        if eos is None and self.add_eos_token:
            raise ValueError('add_eos_token = True but eos_token = None')
        single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}"
        pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}"
        special_tokens = []
        if self.add_bos_token:
            special_tokens.append((bos, bos_token_id))
        if self.add_eos_token:
            special_tokens.append((eos, eos_token_id))
        self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens)

    @property
    def add_eos_token(self):
        return self._add_eos_token

    @property
    def add_bos_token(self):
        return self._add_bos_token

    @add_eos_token.setter
    def add_eos_token(self, value):
        self._add_eos_token = value
        self.update_post_processor()

    @add_bos_token.setter
    def add_bos_token(self, value):
        self._add_bos_token = value
        self.update_post_processor()

    def apply_tool_use_template(self, conversation: Union[List[Dict[str, str]]], tools: List[Dict], **kwargs) -> Union[str, List[int]]:
        return self.apply_chat_template(conversation, chat_template='tool_use', tools=tools, **kwargs)

    def apply_grounded_generation_template(self, conversation: Union[List[Dict[str, str]]], documents: List[Dict], citation_mode: Literal['fast', 'accurate']='accurate', **kwargs) -> Union[str, List[int]]:
        return self.apply_chat_template(conversation, chat_template='rag', documents=documents, citation_mode=citation_mode, **kwargs)

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        bos_token_id = [self.bos_token_id] if self.add_bos_token else []
        eos_token_id = [self.eos_token_id] if self.add_eos_token else []
        output = bos_token_id + token_ids_0 + eos_token_id
        if token_ids_1 is not None:
            output = output + bos_token_id + token_ids_1 + eos_token_id
        return output

# File: transformers-main/src/transformers/models/conditional_detr/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {'configuration_conditional_detr': ['ConditionalDetrConfig', 'ConditionalDetrOnnxConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['feature_extraction_conditional_detr'] = ['ConditionalDetrFeatureExtractor']
    _import_structure['image_processing_conditional_detr'] = ['ConditionalDetrImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_conditional_detr'] = ['ConditionalDetrForObjectDetection', 'ConditionalDetrForSegmentation', 'ConditionalDetrModel', 'ConditionalDetrPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_conditional_detr import ConditionalDetrConfig, ConditionalDetrOnnxConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
        from .image_processing_conditional_detr import ConditionalDetrImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_conditional_detr import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/conditional_detr/configuration_conditional_detr.py
""""""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)

class ConditionalDetrConfig(PretrainedConfig):
    model_type = 'conditional_detr'
    keys_to_ignore_at_inference = ['past_key_values']
    attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}

    def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, **kwargs):
        if use_timm_backbone and backbone_kwargs is None:
            backbone_kwargs = {}
            if dilation:
                backbone_kwargs['output_stride'] = 16
            backbone_kwargs['out_indices'] = [1, 2, 3, 4]
            backbone_kwargs['in_chans'] = num_channels
        elif not use_timm_backbone and backbone in (None, 'resnet50'):
            if backbone_config is None:
                logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
                backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4'])
            elif isinstance(backbone_config, dict):
                backbone_model_type = backbone_config.get('model_type')
                config_class = CONFIG_MAPPING[backbone_model_type]
                backbone_config = config_class.from_dict(backbone_config)
        verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
        self.use_timm_backbone = use_timm_backbone
        self.backbone_config = backbone_config
        self.num_channels = num_channels
        self.num_queries = num_queries
        self.d_model = d_model
        self.encoder_ffn_dim = encoder_ffn_dim
        self.encoder_layers = encoder_layers
        self.encoder_attention_heads = encoder_attention_heads
        self.decoder_ffn_dim = decoder_ffn_dim
        self.decoder_layers = decoder_layers
        self.decoder_attention_heads = decoder_attention_heads
        self.dropout = dropout
        self.attention_dropout = attention_dropout
        self.activation_dropout = activation_dropout
        self.activation_function = activation_function
        self.init_std = init_std
        self.init_xavier_std = init_xavier_std
        self.encoder_layerdrop = encoder_layerdrop
        self.decoder_layerdrop = decoder_layerdrop
        self.num_hidden_layers = encoder_layers
        self.auxiliary_loss = auxiliary_loss
        self.position_embedding_type = position_embedding_type
        self.backbone = backbone
        self.use_pretrained_backbone = use_pretrained_backbone
        self.backbone_kwargs = backbone_kwargs
        self.dilation = dilation
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        self.mask_loss_coefficient = mask_loss_coefficient
        self.dice_loss_coefficient = dice_loss_coefficient
        self.cls_loss_coefficient = cls_loss_coefficient
        self.bbox_loss_coefficient = bbox_loss_coefficient
        self.giou_loss_coefficient = giou_loss_coefficient
        self.focal_alpha = focal_alpha
        super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)

    @property
    def num_attention_heads(self) -> int:
        return self.encoder_attention_heads

    @property
    def hidden_size(self) -> int:
        return self.d_model

class ConditionalDetrOnnxConfig(OnnxConfig):
    torch_onnx_minimum_version = version.parse('1.11')

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'})])

    @property
    def atol_for_validation(self) -> float:
        return 1e-05

    @property
    def default_onnx_opset(self) -> int:
        return 12

# File: transformers-main/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py
""""""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
rename_keys = []
for i in range(6):
    rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight'))
    rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias'))
    rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
    rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
    rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
    rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
    rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight'))
    rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
    rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
    rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
    rename_keys.append((f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias'))
rename_keys.extend([('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias')])

def rename_key(state_dict, old, new):
    val = state_dict.pop(old)
    state_dict[new] = val

def rename_backbone_keys(state_dict):
    new_state_dict = OrderedDict()
    for (key, value) in state_dict.items():
        if 'backbone.0.body' in key:
            new_key = key.replace('backbone.0.body', 'backbone.conv_encoder.model')
            new_state_dict[new_key] = value
        else:
            new_state_dict[key] = value
    return new_state_dict

def read_in_q_k_v(state_dict, is_panoptic=False):
    prefix = ''
    if is_panoptic:
        prefix = 'conditional_detr.'
    for i in range(6):
        in_proj_weight = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight')
        in_proj_bias = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias')
        state_dict[f'encoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :]
        state_dict[f'encoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256]
        state_dict[f'encoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :]
        state_dict[f'encoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512]
        state_dict[f'encoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :]
        state_dict[f'encoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:]

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path):
    config = ConditionalDetrConfig()
    if 'resnet101' in model_name:
        config.backbone = 'resnet101'
    if 'dc5' in model_name:
        config.dilation = True
    is_panoptic = 'panoptic' in model_name
    if is_panoptic:
        config.num_labels = 250
    else:
        config.num_labels = 91
        repo_id = 'huggingface/label-files'
        filename = 'coco-detection-id2label.json'
        id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
        id2label = {int(k): v for (k, v) in id2label.items()}
        config.id2label = id2label
        config.label2id = {v: k for (k, v) in id2label.items()}
    format = 'coco_panoptic' if is_panoptic else 'coco_detection'
    image_processor = ConditionalDetrImageProcessor(format=format)
    img = prepare_img()
    encoding = image_processor(images=img, return_tensors='pt')
    pixel_values = encoding['pixel_values']
    logger.info(f'Converting model {model_name}...')
    conditional_detr = torch.hub.load('DeppMeng/ConditionalDETR', model_name, pretrained=True).eval()
    state_dict = conditional_detr.state_dict()
    for (src, dest) in rename_keys:
        if is_panoptic:
            src = 'conditional_detr.' + src
        rename_key(state_dict, src, dest)
    state_dict = rename_backbone_keys(state_dict)
    read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
    prefix = 'conditional_detr.model.' if is_panoptic else 'model.'
    for key in state_dict.copy().keys():
        if is_panoptic:
            if key.startswith('conditional_detr') and (not key.startswith('class_labels_classifier')) and (not key.startswith('bbox_predictor')):
                val = state_dict.pop(key)
                state_dict['conditional_detr.model' + key[4:]] = val
            elif 'class_labels_classifier' in key or 'bbox_predictor' in key:
                val = state_dict.pop(key)
                state_dict['conditional_detr.' + key] = val
            elif key.startswith('bbox_attention') or key.startswith('mask_head'):
                continue
            else:
                val = state_dict.pop(key)
                state_dict[prefix + key] = val
        elif not key.startswith('class_labels_classifier') and (not key.startswith('bbox_predictor')):
            val = state_dict.pop(key)
            state_dict[prefix + key] = val
    model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config)
    model.load_state_dict(state_dict)
    model.eval()
    model.push_to_hub(repo_id=model_name, organization='DepuMeng', commit_message='Add model')
    original_outputs = conditional_detr(pixel_values)
    outputs = model(pixel_values)
    assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=0.0001)
    assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=0.0001)
    if is_panoptic:
        assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=0.0001)
    logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...')
    Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
    model.save_pretrained(pytorch_dump_folder_path)
    image_processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', default='conditional_detr_resnet50', type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.')
    args = parser.parse_args()
    convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)

# File: transformers-main/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py
""""""
import warnings
from ...image_transforms import rgb_to_id as _rgb_to_id
from ...utils import logging
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
logger = logging.get_logger(__name__)

def rgb_to_id(x):
    warnings.warn('rgb_to_id has moved and will not be importable from this module from v5. Please import from transformers.image_transforms instead.', FutureWarning)
    return _rgb_to_id(x)

class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor):

    def __init__(self, *args, **kwargs) -> None:
        warnings.warn('The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ConditionalDetrImageProcessor instead.', FutureWarning)
        super().__init__(*args, **kwargs)

# File: transformers-main/src/transformers/models/conditional_detr/image_processing_conditional_detr.py
""""""
import io
import pathlib
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import BaseImageProcessor, get_size_dict
from ...image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments
from ...utils import TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_torch_available():
    import torch
    from torch import nn
if is_vision_available():
    import PIL
if is_scipy_available():
    import scipy.special
    import scipy.stats
logger = logging.get_logger(__name__)
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)

def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
    (height, width) = image_size
    raw_size = None
    if max_size is not None:
        min_original_size = float(min((height, width)))
        max_original_size = float(max((height, width)))
        if max_original_size / min_original_size * size > max_size:
            raw_size = max_size * min_original_size / max_original_size
            size = int(round(raw_size))
    if height <= width and height == size or (width <= height and width == size):
        (oh, ow) = (height, width)
    elif width < height:
        ow = size
        if max_size is not None and raw_size is not None:
            oh = int(raw_size * height / width)
        else:
            oh = int(size * height / width)
    else:
        oh = size
        if max_size is not None and raw_size is not None:
            ow = int(raw_size * width / height)
        else:
            ow = int(size * width / height)
    return (oh, ow)

def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    if isinstance(size, (list, tuple)):
        return size
    return get_size_with_aspect_ratio(image_size, size, max_size)

def get_image_size_for_max_height_width(input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    (height, width) = image_size
    height_scale = max_height / height
    width_scale = max_width / width
    min_scale = min(height_scale, width_scale)
    new_height = int(height * min_scale)
    new_width = int(width * min_scale)
    return (new_height, new_width)

def get_numpy_to_framework_fn(arr) -> Callable:
    if isinstance(arr, np.ndarray):
        return np.array
    if is_tf_available() and is_tf_tensor(arr):
        import tensorflow as tf
        return tf.convert_to_tensor
    if is_torch_available() and is_torch_tensor(arr):
        import torch
        return torch.tensor
    if is_flax_available() and is_jax_tensor(arr):
        import jax.numpy as jnp
        return jnp.array
    raise ValueError(f'Cannot convert arrays of type {type(arr)}')

def safe_squeeze(arr: np.ndarray, axis: Optional[int]=None) -> np.ndarray:
    if axis is None:
        return arr.squeeze()
    try:
        return arr.squeeze(axis=axis)
    except ValueError:
        return arr

def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
    (image_height, image_width) = image_size
    norm_annotation = {}
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            boxes = corners_to_center_format(boxes)
            boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
            norm_annotation[key] = boxes
        else:
            norm_annotation[key] = value
    return norm_annotation

def max_across_indices(values: Iterable[Any]) -> List[Any]:
    return [max(values_i) for values_i in zip(*values)]

def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]:
    if input_data_format is None:
        input_data_format = infer_channel_dimension_format(images[0])
    if input_data_format == ChannelDimension.FIRST:
        (_, max_height, max_width) = max_across_indices([img.shape for img in images])
    elif input_data_format == ChannelDimension.LAST:
        (max_height, max_width, _) = max_across_indices([img.shape for img in images])
    else:
        raise ValueError(f'Invalid channel dimension format: {input_data_format}')
    return (max_height, max_width)

def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
    (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
    mask = np.zeros(output_size, dtype=np.int64)
    mask[:input_height, :input_width] = 1
    return mask

def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
    try:
        from pycocotools import mask as coco_mask
    except ImportError:
        raise ImportError('Pycocotools is not installed in your environment.')
    masks = []
    for polygons in segmentations:
        rles = coco_mask.frPyObjects(polygons, height, width)
        mask = coco_mask.decode(rles)
        if len(mask.shape) < 3:
            mask = mask[..., None]
        mask = np.asarray(mask, dtype=np.uint8)
        mask = np.any(mask, axis=2)
        masks.append(mask)
    if masks:
        masks = np.stack(masks, axis=0)
    else:
        masks = np.zeros((0, height, width), dtype=np.uint8)
    return masks

def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool=False, input_data_format: Optional[Union[ChannelDimension, str]]=None):
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    image_id = target['image_id']
    image_id = np.asarray([image_id], dtype=np.int64)
    annotations = target['annotations']
    annotations = [obj for obj in annotations if 'iscrowd' not in obj or obj['iscrowd'] == 0]
    classes = [obj['category_id'] for obj in annotations]
    classes = np.asarray(classes, dtype=np.int64)
    area = np.asarray([obj['area'] for obj in annotations], dtype=np.float32)
    iscrowd = np.asarray([obj['iscrowd'] if 'iscrowd' in obj else 0 for obj in annotations], dtype=np.int64)
    boxes = [obj['bbox'] for obj in annotations]
    boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
    boxes[:, 2:] += boxes[:, :2]
    boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
    boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
    keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
    new_target = {}
    new_target['image_id'] = image_id
    new_target['class_labels'] = classes[keep]
    new_target['boxes'] = boxes[keep]
    new_target['area'] = area[keep]
    new_target['iscrowd'] = iscrowd[keep]
    new_target['orig_size'] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
    if annotations and 'keypoints' in annotations[0]:
        keypoints = [obj['keypoints'] for obj in annotations]
        keypoints = np.asarray(keypoints, dtype=np.float32)
        keypoints = keypoints[keep]
        num_keypoints = keypoints.shape[0]
        keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
        new_target['keypoints'] = keypoints
    if return_segmentation_masks:
        segmentation_masks = [obj['segmentation'] for obj in annotations]
        masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
        new_target['masks'] = masks[keep]
    return new_target

def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
    if masks.size == 0:
        return np.zeros((0, 4))
    (h, w) = masks.shape[-2:]
    y = np.arange(0, h, dtype=np.float32)
    x = np.arange(0, w, dtype=np.float32)
    (y, x) = np.meshgrid(y, x, indexing='ij')
    x_mask = masks * np.expand_dims(x, axis=0)
    x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
    x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool))
    x_min = x.filled(fill_value=100000000.0)
    x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
    y_mask = masks * np.expand_dims(y, axis=0)
    y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
    y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool))
    y_min = y.filled(fill_value=100000000.0)
    y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
    return np.stack([x_min, y_min, x_max, y_max], 1)

def prepare_coco_panoptic_annotation(image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool=True, input_data_format: Union[ChannelDimension, str]=None) -> Dict:
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    annotation_path = pathlib.Path(masks_path) / target['file_name']
    new_target = {}
    new_target['image_id'] = np.asarray([target['image_id'] if 'image_id' in target else target['id']], dtype=np.int64)
    new_target['size'] = np.asarray([image_height, image_width], dtype=np.int64)
    new_target['orig_size'] = np.asarray([image_height, image_width], dtype=np.int64)
    if 'segments_info' in target:
        masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
        masks = rgb_to_id(masks)
        ids = np.array([segment_info['id'] for segment_info in target['segments_info']])
        masks = masks == ids[:, None, None]
        masks = masks.astype(np.uint8)
        if return_masks:
            new_target['masks'] = masks
        new_target['boxes'] = masks_to_boxes(masks)
        new_target['class_labels'] = np.array([segment_info['category_id'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['iscrowd'] = np.asarray([segment_info['iscrowd'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['area'] = np.asarray([segment_info['area'] for segment_info in target['segments_info']], dtype=np.float32)
    return new_target

def get_segmentation_image(masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False):
    (h, w) = input_size
    (final_h, final_w) = target_size
    m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
    if m_id.shape[-1] == 0:
        m_id = np.zeros((h, w), dtype=np.int64)
    else:
        m_id = m_id.argmax(-1).reshape(h, w)
    if deduplicate:
        for equiv in stuff_equiv_classes.values():
            for eq_id in equiv:
                m_id[m_id == eq_id] = equiv[0]
    seg_img = id_to_rgb(m_id)
    seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
    return seg_img

def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
    (final_h, final_w) = target_size
    np_seg_img = seg_img.astype(np.uint8)
    np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
    m_id = rgb_to_id(np_seg_img)
    area = [(m_id == i).sum() for i in range(n_classes)]
    return area

def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    probs = scipy.special.softmax(logits, axis=-1)
    labels = probs.argmax(-1, keepdims=True)
    scores = np.take_along_axis(probs, labels, axis=-1)
    (scores, labels) = (scores.squeeze(-1), labels.squeeze(-1))
    return (scores, labels)

def post_process_panoptic_sample(out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85) -> Dict:
    (scores, labels) = score_labels_from_class_probabilities(out_logits)
    keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
    cur_scores = scores[keep]
    cur_classes = labels[keep]
    cur_boxes = center_to_corners_format(boxes[keep])
    if len(cur_boxes) != len(cur_classes):
        raise ValueError('Not as many boxes as there are classes')
    cur_masks = masks[keep]
    cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
    cur_masks = safe_squeeze(cur_masks, 1)
    (b, h, w) = cur_masks.shape
    cur_masks = cur_masks.reshape(b, -1)
    stuff_equiv_classes = defaultdict(list)
    for (k, label) in enumerate(cur_classes):
        if not is_thing_map[label]:
            stuff_equiv_classes[label].append(k)
    seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
    area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
    if cur_classes.size() > 0:
        filtered_small = np.array([a <= 4 for a in area], dtype=bool)
        while filtered_small.any():
            cur_masks = cur_masks[~filtered_small]
            cur_scores = cur_scores[~filtered_small]
            cur_classes = cur_classes[~filtered_small]
            seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
            area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
            filtered_small = np.array([a <= 4 for a in area], dtype=bool)
    else:
        cur_classes = np.ones((1, 1), dtype=np.int64)
    segments_info = [{'id': i, 'isthing': is_thing_map[cat], 'category_id': int(cat), 'area': a} for (i, (cat, a)) in enumerate(zip(cur_classes, area))]
    del cur_classes
    with io.BytesIO() as out:
        PIL.Image.fromarray(seg_img).save(out, format='PNG')
        predictions = {'png_string': out.getvalue(), 'segments_info': segments_info}
    return predictions

def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):
    ratios = tuple((float(s) / float(s_orig) for (s, s_orig) in zip(target_size, orig_size)))
    (ratio_height, ratio_width) = ratios
    new_annotation = {}
    new_annotation['size'] = target_size
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
            new_annotation['boxes'] = scaled_boxes
        elif key == 'area':
            area = value
            scaled_area = area * (ratio_width * ratio_height)
            new_annotation['area'] = scaled_area
        elif key == 'masks':
            masks = value[:, None]
            masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
            masks = masks.astype(np.float32)
            masks = masks[:, 0] > threshold
            new_annotation['masks'] = masks
        elif key == 'size':
            new_annotation['size'] = target_size
        else:
            new_annotation[key] = value
    return new_annotation

def binary_mask_to_rle(mask):
    if is_torch_tensor(mask):
        mask = mask.numpy()
    pixels = mask.flatten()
    pixels = np.concatenate([[0], pixels, [0]])
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    runs[1::2] -= runs[::2]
    return list(runs)

def convert_segmentation_to_rle(segmentation):
    segment_ids = torch.unique(segmentation)
    run_length_encodings = []
    for idx in segment_ids:
        mask = torch.where(segmentation == idx, 1, 0)
        rle = binary_mask_to_rle(mask)
        run_length_encodings.append(rle)
    return run_length_encodings

def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
    if not masks.shape[0] == scores.shape[0] == labels.shape[0]:
        raise ValueError('mask, scores and labels must have the same shape!')
    to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
    return (masks[to_keep], scores[to_keep], labels[to_keep])

def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
    mask_k = mask_labels == k
    mask_k_area = mask_k.sum()
    original_area = (mask_probs[k] >= mask_threshold).sum()
    mask_exists = mask_k_area > 0 and original_area > 0
    if mask_exists:
        area_ratio = mask_k_area / original_area
        if not area_ratio.item() > overlap_mask_area_threshold:
            mask_exists = False
    return (mask_exists, mask_k)

def compute_segments(mask_probs, pred_scores, pred_labels, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_size: Tuple[int, int]=None):
    height = mask_probs.shape[1] if target_size is None else target_size[0]
    width = mask_probs.shape[2] if target_size is None else target_size[1]
    segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
    segments: List[Dict] = []
    if target_size is not None:
        mask_probs = nn.functional.interpolate(mask_probs.unsqueeze(0), size=target_size, mode='bilinear', align_corners=False)[0]
    current_segment_id = 0
    mask_probs *= pred_scores.view(-1, 1, 1)
    mask_labels = mask_probs.argmax(0)
    stuff_memory_list: Dict[str, int] = {}
    for k in range(pred_labels.shape[0]):
        pred_class = pred_labels[k].item()
        should_fuse = pred_class in label_ids_to_fuse
        (mask_exists, mask_k) = check_segment_validity(mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold)
        if mask_exists:
            if pred_class in stuff_memory_list:
                current_segment_id = stuff_memory_list[pred_class]
            else:
                current_segment_id += 1
            segmentation[mask_k] = current_segment_id
            segment_score = round(pred_scores[k].item(), 6)
            segments.append({'id': current_segment_id, 'label_id': pred_class, 'was_fused': should_fuse, 'score': segment_score})
            if should_fuse:
                stuff_memory_list[pred_class] = current_segment_id
    return (segmentation, segments)

class ConditionalDetrImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values', 'pixel_mask']

    def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Union[float, List[float]]=None, image_std: Union[float, List[float]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None:
        if 'pad_and_return_pixel_mask' in kwargs:
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.")
            max_size = kwargs.pop('max_size')
        else:
            max_size = None if size is None else 1333
        size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333}
        size = get_size_dict(size, max_size=max_size, default_to_square=False)
        if do_convert_annotations is None:
            do_convert_annotations = do_normalize
        super().__init__(**kwargs)
        self.format = format
        self.do_resize = do_resize
        self.size = size
        self.resample = resample
        self.do_rescale = do_rescale
        self.rescale_factor = rescale_factor
        self.do_normalize = do_normalize
        self.do_convert_annotations = do_convert_annotations
        self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
        self.do_pad = do_pad
        self.pad_size = pad_size
        self._valid_processor_keys = ['images', 'annotations', 'return_segmentation_masks', 'masks_path', 'do_resize', 'size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'do_convert_annotations', 'image_mean', 'image_std', 'do_pad', 'pad_size', 'format', 'return_tensors', 'data_format', 'input_data_format']

    @classmethod
    def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
        image_processor_dict = image_processor_dict.copy()
        if 'max_size' in kwargs:
            image_processor_dict['max_size'] = kwargs.pop('max_size')
        if 'pad_and_return_pixel_mask' in kwargs:
            image_processor_dict['pad_and_return_pixel_mask'] = kwargs.pop('pad_and_return_pixel_mask')
        return super().from_dict(image_processor_dict, **kwargs)

    def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict:
        format = format if format is not None else self.format
        if format == AnnotationFormat.COCO_DETECTION:
            return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format)
        elif format == AnnotationFormat.COCO_PANOPTIC:
            return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format)
        else:
            raise ValueError(f'Format {format} is not supported.')
        return target

    def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.")
            max_size = kwargs.pop('max_size')
        else:
            max_size = None
        size = get_size_dict(size, max_size=max_size, default_to_square=False)
        if 'shortest_edge' in size and 'longest_edge' in size:
            new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format)
        elif 'max_height' in size and 'max_width' in size:
            new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format)
        elif 'height' in size and 'width' in size:
            new_size = (size['height'], size['width'])
        else:
            raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
        image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
        return image

    def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict:
        return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)

    def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
        return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)

    def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
        return normalize_annotation(annotation, image_size=image_size)

    def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict:
        new_annotation = {}
        new_annotation['size'] = output_image_size
        for (key, value) in annotation.items():
            if key == 'masks':
                masks = value
                masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST)
                masks = safe_squeeze(masks, 1)
                new_annotation['masks'] = masks
            elif key == 'boxes' and update_bboxes:
                boxes = value
                boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]])
                new_annotation['boxes'] = boxes
            elif key == 'size':
                new_annotation['size'] = output_image_size
            else:
                new_annotation[key] = value
        return new_annotation

    def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray:
        (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
        (output_height, output_width) = output_size
        pad_bottom = output_height - input_height
        pad_right = output_width - input_width
        padding = ((0, pad_bottom), (0, pad_right))
        padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
        if annotation is not None:
            annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes)
        return (padded_image, annotation)

    def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:
        pad_size = pad_size if pad_size is not None else self.pad_size
        if pad_size is not None:
            padded_size = (pad_size['height'], pad_size['width'])
        else:
            padded_size = get_max_height_width(images, input_data_format=input_data_format)
        annotation_list = annotations if annotations is not None else [None] * len(images)
        padded_images = []
        padded_annotations = []
        for (image, annotation) in zip(images, annotation_list):
            (padded_image, padded_annotation) = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)
            padded_images.append(padded_image)
            padded_annotations.append(padded_annotation)
        data = {'pixel_values': padded_images}
        if return_pixel_mask:
            masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]
            data['pixel_mask'] = masks
        encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
        if annotations is not None:
            encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]
        return encoded_inputs

    def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature:
        if 'pad_and_return_pixel_mask' in kwargs:
            logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.')
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        max_size = None
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.")
            size = kwargs.pop('max_size')
        do_resize = self.do_resize if do_resize is None else do_resize
        size = self.size if size is None else size
        size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
        resample = self.resample if resample is None else resample
        do_rescale = self.do_rescale if do_rescale is None else do_rescale
        rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
        do_normalize = self.do_normalize if do_normalize is None else do_normalize
        image_mean = self.image_mean if image_mean is None else image_mean
        image_std = self.image_std if image_std is None else image_std
        do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
        do_pad = self.do_pad if do_pad is None else do_pad
        pad_size = self.pad_size if pad_size is None else pad_size
        format = self.format if format is None else format
        images = make_list_of_images(images)
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
        if annotations is not None and isinstance(annotations, dict):
            annotations = [annotations]
        if annotations is not None and len(images) != len(annotations):
            raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.')
        format = AnnotationFormat(format)
        if annotations is not None:
            validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
        if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))):
            raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.')
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        if annotations is not None:
            prepared_images = []
            prepared_annotations = []
            for (image, target) in zip(images, annotations):
                target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format)
                prepared_images.append(image)
                prepared_annotations.append(target)
            images = prepared_images
            annotations = prepared_annotations
            del prepared_images, prepared_annotations
        if do_resize:
            if annotations is not None:
                (resized_images, resized_annotations) = ([], [])
                for (image, target) in zip(images, annotations):
                    orig_size = get_image_size(image, input_data_format)
                    resized_image = self.resize(image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format)
                    resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format))
                    resized_images.append(resized_image)
                    resized_annotations.append(resized_annotation)
                images = resized_images
                annotations = resized_annotations
                del resized_images, resized_annotations
            else:
                images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
        if do_rescale:
            images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
        if do_normalize:
            images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]
        if do_convert_annotations and annotations is not None:
            annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for (annotation, image) in zip(annotations, images)]
        if do_pad:
            encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size)
        else:
            images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
            encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
            if annotations is not None:
                encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations]
        return encoded_inputs

    def post_process(self, outputs, target_sizes):
        logging.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')
        (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes)
        if len(out_logits) != len(target_sizes):
            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
        if target_sizes.shape[1] != 2:
            raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
        prob = out_logits.sigmoid()
        (topk_values, topk_indexes) = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)
        scores = topk_values
        topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
        labels = topk_indexes % out_logits.shape[2]
        boxes = center_to_corners_format(out_bbox)
        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
        (img_h, img_w) = target_sizes.unbind(1)
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
        boxes = boxes * scale_fct[:, None, :]
        results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)]
        return results

    def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):
        (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes)
        if target_sizes is not None:
            if len(out_logits) != len(target_sizes):
                raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
        prob = out_logits.sigmoid()
        prob = prob.view(out_logits.shape[0], -1)
        k_value = min(top_k, prob.size(1))
        (topk_values, topk_indexes) = torch.topk(prob, k_value, dim=1)
        scores = topk_values
        topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
        labels = topk_indexes % out_logits.shape[2]
        boxes = center_to_corners_format(out_bbox)
        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
        if target_sizes is not None:
            if isinstance(target_sizes, List):
                img_h = torch.Tensor([i[0] for i in target_sizes])
                img_w = torch.Tensor([i[1] for i in target_sizes])
            else:
                (img_h, img_w) = target_sizes.unbind(1)
            scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
            boxes = boxes * scale_fct[:, None, :]
        results = []
        for (s, l, b) in zip(scores, labels, boxes):
            score = s[s > threshold]
            label = l[s > threshold]
            box = b[s > threshold]
            results.append({'scores': score, 'labels': label, 'boxes': box})
        return results

    def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]]=None):
        class_queries_logits = outputs.logits
        masks_queries_logits = outputs.pred_masks
        masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
        masks_probs = masks_queries_logits.sigmoid()
        segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)
        batch_size = class_queries_logits.shape[0]
        if target_sizes is not None:
            if batch_size != len(target_sizes):
                raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
            semantic_segmentation = []
            for idx in range(batch_size):
                resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
                semantic_map = resized_logits[0].argmax(dim=0)
                semantic_segmentation.append(semantic_map)
        else:
            semantic_segmentation = segmentation.argmax(dim=1)
            semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
        return semantic_segmentation

    def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[List[Tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False) -> List[Dict]:
        class_queries_logits = outputs.logits
        masks_queries_logits = outputs.pred_masks
        batch_size = class_queries_logits.shape[0]
        num_labels = class_queries_logits.shape[-1] - 1
        mask_probs = masks_queries_logits.sigmoid()
        (pred_scores, pred_labels) = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
        results: List[Dict[str, TensorType]] = []
        for i in range(batch_size):
            (mask_probs_item, pred_scores_item, pred_labels_item) = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)
            if mask_probs_item.shape[0] <= 0:
                (height, width) = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
                segmentation = torch.zeros((height, width)) - 1
                results.append({'segmentation': segmentation, 'segments_info': []})
                continue
            target_size = target_sizes[i] if target_sizes is not None else None
            (segmentation, segments) = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size)
            if return_coco_annotation:
                segmentation = convert_segmentation_to_rle(segmentation)
            results.append({'segmentation': segmentation, 'segments_info': segments})
        return results

    def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_sizes: Optional[List[Tuple[int, int]]]=None) -> List[Dict]:
        if label_ids_to_fuse is None:
            logger.warning_once('`label_ids_to_fuse` unset. No instance will be fused.')
            label_ids_to_fuse = set()
        class_queries_logits = outputs.logits
        masks_queries_logits = outputs.pred_masks
        batch_size = class_queries_logits.shape[0]
        num_labels = class_queries_logits.shape[-1] - 1
        mask_probs = masks_queries_logits.sigmoid()
        (pred_scores, pred_labels) = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
        results: List[Dict[str, TensorType]] = []
        for i in range(batch_size):
            (mask_probs_item, pred_scores_item, pred_labels_item) = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)
            if mask_probs_item.shape[0] <= 0:
                (height, width) = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
                segmentation = torch.zeros((height, width)) - 1
                results.append({'segmentation': segmentation, 'segments_info': []})
                continue
            target_size = target_sizes[i] if target_sizes is not None else None
            (segmentation, segments) = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size)
            results.append({'segmentation': segmentation, 'segments_info': segments})
        return results

# File: transformers-main/src/transformers/models/conditional_detr/modeling_conditional_detr.py
""""""
import math
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import Tensor, nn
from ...activations import ACT2FN
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends
from ...utils.backbone_utils import load_backbone
from .configuration_conditional_detr import ConditionalDetrConfig
if is_accelerate_available():
    from accelerate import PartialState
    from accelerate.utils import reduce
if is_scipy_available():
    from scipy.optimize import linear_sum_assignment
if is_timm_available():
    from timm import create_model
if is_vision_available():
    from ...image_transforms import center_to_corners_format
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'ConditionalDetrConfig'
_CHECKPOINT_FOR_DOC = 'microsoft/conditional-detr-resnet-50'

@dataclass
class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions):
    intermediate_hidden_states: Optional[torch.FloatTensor] = None
    reference_points: Optional[Tuple[torch.FloatTensor]] = None

@dataclass
class ConditionalDetrModelOutput(Seq2SeqModelOutput):
    intermediate_hidden_states: Optional[torch.FloatTensor] = None
    reference_points: Optional[Tuple[torch.FloatTensor]] = None

@dataclass
class ConditionalDetrObjectDetectionOutput(ModelOutput):
    loss: Optional[torch.FloatTensor] = None
    loss_dict: Optional[Dict] = None
    logits: torch.FloatTensor = None
    pred_boxes: torch.FloatTensor = None
    auxiliary_outputs: Optional[List[Dict]] = None
    last_hidden_state: Optional[torch.FloatTensor] = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None

@dataclass
class ConditionalDetrSegmentationOutput(ModelOutput):
    loss: Optional[torch.FloatTensor] = None
    loss_dict: Optional[Dict] = None
    logits: torch.FloatTensor = None
    pred_boxes: torch.FloatTensor = None
    pred_masks: torch.FloatTensor = None
    auxiliary_outputs: Optional[List[Dict]] = None
    last_hidden_state: Optional[torch.FloatTensor] = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None

class ConditionalDetrFrozenBatchNorm2d(nn.Module):

    def __init__(self, n):
        super().__init__()
        self.register_buffer('weight', torch.ones(n))
        self.register_buffer('bias', torch.zeros(n))
        self.register_buffer('running_mean', torch.zeros(n))
        self.register_buffer('running_var', torch.ones(n))

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
        num_batches_tracked_key = prefix + 'num_batches_tracked'
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]
        super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)

    def forward(self, x):
        weight = self.weight.reshape(1, -1, 1, 1)
        bias = self.bias.reshape(1, -1, 1, 1)
        running_var = self.running_var.reshape(1, -1, 1, 1)
        running_mean = self.running_mean.reshape(1, -1, 1, 1)
        epsilon = 1e-05
        scale = weight * (running_var + epsilon).rsqrt()
        bias = bias - running_mean * scale
        return x * scale + bias

def replace_batch_norm(model):
    for (name, module) in model.named_children():
        if isinstance(module, nn.BatchNorm2d):
            new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features)
            if not module.weight.device == torch.device('meta'):
                new_module.weight.data.copy_(module.weight)
                new_module.bias.data.copy_(module.bias)
                new_module.running_mean.data.copy_(module.running_mean)
                new_module.running_var.data.copy_(module.running_var)
            model._modules[name] = new_module
        if len(list(module.children())) > 0:
            replace_batch_norm(module)

class ConditionalDetrConvEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        if config.use_timm_backbone:
            requires_backends(self, ['timm'])
            kwargs = getattr(config, 'backbone_kwargs', {})
            kwargs = {} if kwargs is None else kwargs.copy()
            out_indices = kwargs.pop('out_indices', (1, 2, 3, 4))
            num_channels = kwargs.pop('in_chans', config.num_channels)
            if config.dilation:
                kwargs['output_stride'] = kwargs.get('output_stride', 16)
            backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs)
        else:
            backbone = load_backbone(config)
        with torch.no_grad():
            replace_batch_norm(backbone)
        self.model = backbone
        self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
        backbone_model_type = None
        if config.backbone is not None:
            backbone_model_type = config.backbone
        elif config.backbone_config is not None:
            backbone_model_type = config.backbone_config.model_type
        else:
            raise ValueError('Either `backbone` or `backbone_config` should be provided in the config')
        if 'resnet' in backbone_model_type:
            for (name, parameter) in self.model.named_parameters():
                if config.use_timm_backbone:
                    if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name):
                        parameter.requires_grad_(False)
                elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name):
                    parameter.requires_grad_(False)

    def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
        features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
        out = []
        for feature_map in features:
            mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
            out.append((feature_map, mask))
        return out

class ConditionalDetrConvModel(nn.Module):

    def __init__(self, conv_encoder, position_embedding):
        super().__init__()
        self.conv_encoder = conv_encoder
        self.position_embedding = position_embedding

    def forward(self, pixel_values, pixel_mask):
        out = self.conv_encoder(pixel_values, pixel_mask)
        pos = []
        for (feature_map, mask) in out:
            pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
        return (out, pos)

class ConditionalDetrSinePositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.temperature = temperature
        self.normalize = normalize
        if scale is not None and normalize is False:
            raise ValueError('normalize should be True if scale is passed')
        if scale is None:
            scale = 2 * math.pi
        self.scale = scale

    def forward(self, pixel_values, pixel_mask):
        if pixel_mask is None:
            raise ValueError('No pixel mask provided')
        y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
        x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
        if self.normalize:
            y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale
            x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale
        dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
        dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
        pos_x = x_embed[:, :, :, None] / dim_t
        pos_y = y_embed[:, :, :, None] / dim_t
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
        return pos

class ConditionalDetrLearnedPositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=256):
        super().__init__()
        self.row_embeddings = nn.Embedding(50, embedding_dim)
        self.column_embeddings = nn.Embedding(50, embedding_dim)

    def forward(self, pixel_values, pixel_mask=None):
        (height, width) = pixel_values.shape[-2:]
        width_values = torch.arange(width, device=pixel_values.device)
        height_values = torch.arange(height, device=pixel_values.device)
        x_emb = self.column_embeddings(width_values)
        y_emb = self.row_embeddings(height_values)
        pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
        pos = pos.permute(2, 0, 1)
        pos = pos.unsqueeze(0)
        pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
        return pos

def build_position_encoding(config):
    n_steps = config.d_model // 2
    if config.position_embedding_type == 'sine':
        position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True)
    elif config.position_embedding_type == 'learned':
        position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps)
    else:
        raise ValueError(f'Not supported {config.position_embedding_type}')
    return position_embedding

def gen_sine_position_embeddings(pos_tensor, d_model):
    scale = 2 * math.pi
    dim = d_model // 2
    dim_t = torch.arange(dim, dtype=torch.float32, device=pos_tensor.device)
    dim_t = 10000 ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / dim)
    x_embed = pos_tensor[:, :, 0] * scale
    y_embed = pos_tensor[:, :, 1] * scale
    pos_x = x_embed[:, :, None] / dim_t
    pos_y = y_embed[:, :, None] / dim_t
    pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
    pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
    pos = torch.cat((pos_y, pos_x), dim=2)
    return pos

def inverse_sigmoid(x, eps=1e-05):
    x = x.clamp(min=0, max=1)
    x1 = x.clamp(min=eps)
    x2 = (1 - x).clamp(min=eps)
    return torch.log(x1 / x2)

class DetrAttention(nn.Module):

    def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]):
        return tensor if object_queries is None else tensor + object_queries

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        is_cross_attention = key_value_states is not None
        (batch_size, target_len, embed_dim) = hidden_states.size()
        if object_queries is not None:
            hidden_states_original = hidden_states
            hidden_states = self.with_pos_embed(hidden_states, object_queries)
        if spatial_position_embeddings is not None:
            key_value_states_original = key_value_states
            key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings)
        query_states = self.q_proj(hidden_states) * self.scaling
        if is_cross_attention:
            key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
            value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
        else:
            key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
            value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
        proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
        query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
        key_states = key_states.view(*proj_shape)
        value_states = value_states.view(*proj_shape)
        source_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
            raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            if attention_mask.size() != (batch_size, 1, target_len, source_len):
                raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
            attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
            attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped)

class ConditionalDetrAttention(nn.Module):

    def __init__(self, embed_dim: int, out_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
        super().__init__()
        self.embed_dim = embed_dim
        self.out_dim = out_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.v_head_dim = out_dim // num_heads
        if self.v_head_dim * num_heads != self.out_dim:
            raise ValueError(f'out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.out_proj = nn.Linear(out_dim, out_dim, bias=bias)

    def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous()

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, key_states: Optional[torch.Tensor]=None, value_states: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        (batch_size, target_len, _) = hidden_states.size()
        query_states = hidden_states * self.scaling
        key_states = self._qk_shape(key_states, -1, batch_size)
        value_states = self._v_shape(value_states, -1, batch_size)
        proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
        v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim)
        query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape)
        key_states = key_states.view(*proj_shape)
        value_states = value_states.view(*v_proj_shape)
        source_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
            raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            if attention_mask.size() != (batch_size, 1, target_len, source_len):
                raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
            attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
            attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim):
            raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(batch_size, target_len, self.out_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped)

class ConditionalDetrEncoderLayer(nn.Module):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__()
        self.embed_dim = config.d_model
        self.self_attn = DetrAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout)
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
        self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor=None, output_attentions: bool=False):
        residual = hidden_states
        (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        if self.training:
            if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
                clamp_value = torch.finfo(hidden_states.dtype).max - 1000
                hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class ConditionalDetrDecoderLayer(nn.Module):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__()
        self.embed_dim = config.d_model
        d_model = config.d_model
        self.sa_qcontent_proj = nn.Linear(d_model, d_model)
        self.sa_qpos_proj = nn.Linear(d_model, d_model)
        self.sa_kcontent_proj = nn.Linear(d_model, d_model)
        self.sa_kpos_proj = nn.Linear(d_model, d_model)
        self.sa_v_proj = nn.Linear(d_model, d_model)
        self.self_attn = ConditionalDetrAttention(embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.ca_qcontent_proj = nn.Linear(d_model, d_model)
        self.ca_qpos_proj = nn.Linear(d_model, d_model)
        self.ca_kcontent_proj = nn.Linear(d_model, d_model)
        self.ca_kpos_proj = nn.Linear(d_model, d_model)
        self.ca_v_proj = nn.Linear(d_model, d_model)
        self.ca_qpos_sine_proj = nn.Linear(d_model, d_model)
        self.encoder_attn = ConditionalDetrAttention(self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout)
        self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
        self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)
        self.nhead = config.decoder_attention_heads

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, is_first: Optional[bool]=False):
        residual = hidden_states
        q_content = self.sa_qcontent_proj(hidden_states)
        q_pos = self.sa_qpos_proj(query_position_embeddings)
        k_content = self.sa_kcontent_proj(hidden_states)
        k_pos = self.sa_kpos_proj(query_position_embeddings)
        v = self.sa_v_proj(hidden_states)
        (_, num_queries, n_model) = q_content.shape
        q = q_content + q_pos
        k = k_content + k_pos
        (hidden_states, self_attn_weights) = self.self_attn(hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        q_content = self.ca_qcontent_proj(hidden_states)
        k_content = self.ca_kcontent_proj(encoder_hidden_states)
        v = self.ca_v_proj(encoder_hidden_states)
        (batch_size, num_queries, n_model) = q_content.shape
        (_, source_len, _) = k_content.shape
        k_pos = self.ca_kpos_proj(object_queries)
        if is_first:
            q_pos = self.ca_qpos_proj(query_position_embeddings)
            q = q_content + q_pos
            k = k_content + k_pos
        else:
            q = q_content
            k = k_content
        q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead)
        query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed)
        query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead)
        q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2)
        k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead)
        k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead)
        k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2)
        cross_attn_weights = None
        if encoder_hidden_states is not None:
            residual = hidden_states
            (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions)
            hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
            hidden_states = residual + hidden_states
            hidden_states = self.encoder_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights, cross_attn_weights)
        return outputs

class MLP(nn.Module):

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim])))

    def forward(self, x):
        for (i, layer) in enumerate(self.layers):
            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class ConditionalDetrPreTrainedModel(PreTrainedModel):
    config_class = ConditionalDetrConfig
    base_model_prefix = 'model'
    main_input_name = 'pixel_values'
    _no_split_modules = ['ConditionalDetrConvEncoder', 'ConditionalDetrEncoderLayer', 'ConditionalDetrDecoderLayer']

    def _init_weights(self, module):
        std = self.config.init_std
        xavier_std = self.config.init_xavier_std
        if isinstance(module, ConditionalDetrMHAttentionMap):
            nn.init.zeros_(module.k_linear.bias)
            nn.init.zeros_(module.q_linear.bias)
            nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std)
            nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std)
        elif isinstance(module, ConditionalDetrLearnedPositionEmbedding):
            nn.init.uniform_(module.row_embeddings.weight)
            nn.init.uniform_(module.column_embeddings.weight)
        if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
CONDITIONAL_DETR_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`ConditionalDetrConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONDITIONAL_DETR_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Padding will be ignored by default should you provide it.\n\n            Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConditionalDetrImageProcessor.__call__`]\n            for details.\n\n        pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n            Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n            - 1 for pixels that are real (i.e. **not masked**),\n            - 0 for pixels that are padding (i.e. **masked**).\n\n            [What are attention masks?](../glossary#attention-mask)\n\n        decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n            Not used by default. Can be used to mask object queries.\n        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n            can choose to directly pass a flattened representation of an image.\n        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n            Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n            embedded representation.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__(config)
        self.dropout = config.dropout
        self.layerdrop = config.encoder_layerdrop
        self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
        self.post_init()

    def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        hidden_states = inputs_embeds
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        if attention_mask is not None:
            attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
        encoder_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, encoder_layer) in enumerate(self.layers):
            if output_hidden_states:
                encoder_states = encoder_states + (hidden_states,)
            to_drop = False
            if self.training:
                dropout_probability = torch.rand([])
                if dropout_probability < self.layerdrop:
                    to_drop = True
            if to_drop:
                layer_outputs = (None, None)
            else:
                layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)
                hidden_states = layer_outputs[0]
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            encoder_states = encoder_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)

class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__(config)
        self.dropout = config.dropout
        self.layerdrop = config.decoder_layerdrop
        self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
        self.layernorm = nn.LayerNorm(config.d_model)
        d_model = config.d_model
        self.gradient_checkpointing = False
        self.query_scale = MLP(d_model, d_model, d_model, 2)
        self.ref_point_head = MLP(d_model, d_model, 2, 2)
        for layer_id in range(config.decoder_layers - 1):
            self.layers[layer_id + 1].ca_qpos_proj = None
        self.post_init()

    def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if inputs_embeds is not None:
            hidden_states = inputs_embeds
            input_shape = inputs_embeds.size()[:-1]
        if encoder_hidden_states is not None and encoder_attention_mask is not None:
            encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
        intermediate = () if self.config.auxiliary_loss else None
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
        reference_points_before_sigmoid = self.ref_point_head(query_position_embeddings)
        reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1)
        obj_center = reference_points[..., :2].transpose(0, 1)
        query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center, self.config.d_model)
        for (idx, decoder_layer) in enumerate(self.layers):
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.training:
                dropout_probability = torch.rand([])
                if dropout_probability < self.layerdrop:
                    continue
            if idx == 0:
                pos_transformation = 1
            else:
                pos_transformation = self.query_scale(hidden_states)
            query_sine_embed = query_sine_embed_before_transformation * pos_transformation
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, None, object_queries, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask, None, None)
            else:
                layer_outputs = decoder_layer(hidden_states, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, query_sine_embed=query_sine_embed, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=idx == 0)
            hidden_states = layer_outputs[0]
            if self.config.auxiliary_loss:
                hidden_states = self.layernorm(hidden_states)
                intermediate += (hidden_states,)
            if output_attentions:
                all_self_attns += (layer_outputs[1],)
                if encoder_hidden_states is not None:
                    all_cross_attentions += (layer_outputs[2],)
        hidden_states = self.layernorm(hidden_states)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        if self.config.auxiliary_loss:
            intermediate = torch.stack(intermediate)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points] if v is not None))
        return ConditionalDetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points)

@add_start_docstrings('\n    The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n    hidden-states without any specific head on top.\n    ', CONDITIONAL_DETR_START_DOCSTRING)
class ConditionalDetrModel(ConditionalDetrPreTrainedModel):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__(config)
        backbone = ConditionalDetrConvEncoder(config)
        object_queries = build_position_encoding(config)
        self.backbone = ConditionalDetrConvModel(backbone, object_queries)
        self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
        self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
        self.encoder = ConditionalDetrEncoder(config)
        self.decoder = ConditionalDetrDecoder(config)
        self.post_init()

    def get_encoder(self):
        return self.encoder

    def get_decoder(self):
        return self.decoder

    def freeze_backbone(self):
        for (name, param) in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(False)

    def unfreeze_backbone(self):
        for (name, param) in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(True)

    @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], ConditionalDetrModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        (batch_size, num_channels, height, width) = pixel_values.shape
        device = pixel_values.device
        if pixel_mask is None:
            pixel_mask = torch.ones((batch_size, height, width), device=device)
        (features, object_queries_list) = self.backbone(pixel_values, pixel_mask)
        (feature_map, mask) = features[-1]
        if mask is None:
            raise ValueError('Backbone does not return downsampled pixel mask')
        projected_feature_map = self.input_projection(feature_map)
        flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
        object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1)
        flattened_mask = mask.flatten(1)
        if encoder_outputs is None:
            encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
            encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
        query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
        queries = torch.zeros_like(query_position_embeddings)
        decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if not return_dict:
            return decoder_outputs + encoder_outputs
        return ConditionalDetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points)

@add_start_docstrings('\n    CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on\n    top, for tasks such as COCO detection.\n    ', CONDITIONAL_DETR_START_DOCSTRING)
class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__(config)
        self.model = ConditionalDetrModel(config)
        self.class_labels_classifier = nn.Linear(config.d_model, config.num_labels)
        self.bbox_predictor = ConditionalDetrMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3)
        self.post_init()

    @torch.jit.unused
    def _set_aux_loss(self, outputs_class, outputs_coord):
        return [{'logits': a, 'pred_boxes': b} for (a, b) in zip(outputs_class[:-1], outputs_coord[:-1])]

    @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], ConditionalDetrObjectDetectionOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.class_labels_classifier(sequence_output)
        reference = outputs.reference_points if return_dict else outputs[-1]
        reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1)
        outputs_coords = []
        hs = sequence_output
        tmp = self.bbox_predictor(hs)
        tmp[..., :2] += reference_before_sigmoid
        pred_boxes = tmp.sigmoid()
        (loss, loss_dict, auxiliary_outputs) = (None, None, None)
        if labels is not None:
            matcher = ConditionalDetrHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost)
            losses = ['labels', 'boxes', 'cardinality']
            criterion = ConditionalDetrLoss(matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses)
            criterion.to(self.device)
            outputs_loss = {}
            outputs_loss['logits'] = logits
            outputs_loss['pred_boxes'] = pred_boxes
            if self.config.auxiliary_loss:
                intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
                outputs_class = self.class_labels_classifier(intermediate)
                for lvl in range(intermediate.shape[0]):
                    tmp = self.bbox_predictor(intermediate[lvl])
                    tmp[..., :2] += reference_before_sigmoid
                    outputs_coord = tmp.sigmoid()
                    outputs_coords.append(outputs_coord)
                outputs_coord = torch.stack(outputs_coords)
                auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
                outputs_loss['auxiliary_outputs'] = auxiliary_outputs
            loss_dict = criterion(outputs_loss, labels)
            weight_dict = {'loss_ce': self.config.cls_loss_coefficient, 'loss_bbox': self.config.bbox_loss_coefficient}
            weight_dict['loss_giou'] = self.config.giou_loss_coefficient
            if self.config.auxiliary_loss:
                aux_weight_dict = {}
                for i in range(self.config.decoder_layers - 1):
                    aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()})
                weight_dict.update(aux_weight_dict)
            loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict))
        if not return_dict:
            if auxiliary_outputs is not None:
                output = (logits, pred_boxes) + auxiliary_outputs + outputs
            else:
                output = (logits, pred_boxes) + outputs
            return (loss, loss_dict) + output if loss is not None else output
        return ConditionalDetrObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)

@add_start_docstrings('\n    CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top,\n    for tasks such as COCO panoptic.\n\n    ', CONDITIONAL_DETR_START_DOCSTRING)
class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel):

    def __init__(self, config: ConditionalDetrConfig):
        super().__init__(config)
        self.conditional_detr = ConditionalDetrForObjectDetection(config)
        (hidden_size, number_of_heads) = (config.d_model, config.encoder_attention_heads)
        intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes
        self.mask_head = ConditionalDetrMaskHeadSmallConv(hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size)
        self.bbox_attention = ConditionalDetrMHAttentionMap(hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], ConditionalDetrSegmentationOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        (batch_size, num_channels, height, width) = pixel_values.shape
        device = pixel_values.device
        if pixel_mask is None:
            pixel_mask = torch.ones((batch_size, height, width), device=device)
        (features, object_queries_list) = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask)
        (feature_map, mask) = features[-1]
        (batch_size, num_channels, height, width) = feature_map.shape
        projected_feature_map = self.conditional_detr.model.input_projection(feature_map)
        flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
        object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1)
        flattened_mask = mask.flatten(1)
        if encoder_outputs is None:
            encoder_outputs = self.conditional_detr.model.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
            encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
        query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
        queries = torch.zeros_like(query_position_embeddings)
        decoder_outputs = self.conditional_detr.model.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = decoder_outputs[0]
        logits = self.conditional_detr.class_labels_classifier(sequence_output)
        pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid()
        memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width)
        mask = flattened_mask.view(batch_size, height, width)
        bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask)
        seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]])
        pred_masks = seg_masks.view(batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])
        (loss, loss_dict, auxiliary_outputs) = (None, None, None)
        if labels is not None:
            matcher = ConditionalDetrHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost)
            losses = ['labels', 'boxes', 'cardinality', 'masks']
            criterion = ConditionalDetrLoss(matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses)
            criterion.to(self.device)
            outputs_loss = {}
            outputs_loss['logits'] = logits
            outputs_loss['pred_boxes'] = pred_boxes
            outputs_loss['pred_masks'] = pred_masks
            if self.config.auxiliary_loss:
                intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1]
                outputs_class = self.conditional_detr.class_labels_classifier(intermediate)
                outputs_coord = self.conditional_detr.bbox_predictor(intermediate).sigmoid()
                auxiliary_outputs = self.conditional_detr._set_aux_loss(outputs_class, outputs_coord)
                outputs_loss['auxiliary_outputs'] = auxiliary_outputs
            loss_dict = criterion(outputs_loss, labels)
            weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient}
            weight_dict['loss_giou'] = self.config.giou_loss_coefficient
            weight_dict['loss_mask'] = self.config.mask_loss_coefficient
            weight_dict['loss_dice'] = self.config.dice_loss_coefficient
            if self.config.auxiliary_loss:
                aux_weight_dict = {}
                for i in range(self.config.decoder_layers - 1):
                    aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()})
                weight_dict.update(aux_weight_dict)
            loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict))
        if not return_dict:
            if auxiliary_outputs is not None:
                output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs
            else:
                output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs
            return (loss, loss_dict) + output if loss is not None else output
        return ConditionalDetrSegmentationOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)

def _expand(tensor, length: int):
    return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)

class ConditionalDetrMaskHeadSmallConv(nn.Module):

    def __init__(self, dim, fpn_dims, context_dim):
        super().__init__()
        if dim % 8 != 0:
            raise ValueError('The hidden_size + number of attention heads must be divisible by 8 as the number of groups in GroupNorm is set to 8')
        inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
        self.lay1 = nn.Conv2d(dim, dim, 3, padding=1)
        self.gn1 = nn.GroupNorm(8, dim)
        self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1)
        self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1])
        self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
        self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2])
        self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
        self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3])
        self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
        self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4])
        self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1)
        self.dim = dim
        self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
        self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
        self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight, a=1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):
        x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
        x = self.lay1(x)
        x = self.gn1(x)
        x = nn.functional.relu(x)
        x = self.lay2(x)
        x = self.gn2(x)
        x = nn.functional.relu(x)
        cur_fpn = self.adapter1(fpns[0])
        if cur_fpn.size(0) != x.size(0):
            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
        x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
        x = self.lay3(x)
        x = self.gn3(x)
        x = nn.functional.relu(x)
        cur_fpn = self.adapter2(fpns[1])
        if cur_fpn.size(0) != x.size(0):
            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
        x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
        x = self.lay4(x)
        x = self.gn4(x)
        x = nn.functional.relu(x)
        cur_fpn = self.adapter3(fpns[2])
        if cur_fpn.size(0) != x.size(0):
            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
        x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
        x = self.lay5(x)
        x = self.gn5(x)
        x = nn.functional.relu(x)
        x = self.out_lay(x)
        return x

class ConditionalDetrMHAttentionMap(nn.Module):

    def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None):
        super().__init__()
        self.num_heads = num_heads
        self.hidden_dim = hidden_dim
        self.dropout = nn.Dropout(dropout)
        self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
        self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
        self.normalize_fact = float(hidden_dim / self.num_heads) ** (-0.5)

    def forward(self, q, k, mask: Optional[Tensor]=None):
        q = self.q_linear(q)
        k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)
        queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
        keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])
        weights = torch.einsum('bqnc,bnchw->bqnhw', queries_per_head * self.normalize_fact, keys_per_head)
        if mask is not None:
            weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min)
        weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size())
        weights = self.dropout(weights)
        return weights

def dice_loss(inputs, targets, num_boxes):
    inputs = inputs.sigmoid()
    inputs = inputs.flatten(1)
    numerator = 2 * (inputs * targets).sum(1)
    denominator = inputs.sum(-1) + targets.sum(-1)
    loss = 1 - (numerator + 1) / (denominator + 1)
    return loss.sum() / num_boxes

def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2):
    prob = inputs.sigmoid()
    ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
    p_t = prob * targets + (1 - prob) * (1 - targets)
    loss = ce_loss * (1 - p_t) ** gamma
    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss = alpha_t * loss
    return loss.mean(1).sum() / num_boxes

class ConditionalDetrLoss(nn.Module):

    def __init__(self, matcher, num_classes, focal_alpha, losses):
        super().__init__()
        self.matcher = matcher
        self.num_classes = num_classes
        self.focal_alpha = focal_alpha
        self.losses = losses

    def loss_labels(self, outputs, targets, indices, num_boxes):
        if 'logits' not in outputs:
            raise KeyError('No logits were found in the outputs')
        source_logits = outputs['logits']
        idx = self._get_source_permutation_idx(indices)
        target_classes_o = torch.cat([t['class_labels'][J] for (t, (_, J)) in zip(targets, indices)])
        target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device)
        target_classes[idx] = target_classes_o
        target_classes_onehot = torch.zeros([source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device)
        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
        target_classes_onehot = target_classes_onehot[:, :, :-1]
        loss_ce = sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1]
        losses = {'loss_ce': loss_ce}
        return losses

    @torch.no_grad()
    def loss_cardinality(self, outputs, targets, indices, num_boxes):
        logits = outputs['logits']
        device = logits.device
        target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device)
        card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
        card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
        losses = {'cardinality_error': card_err}
        return losses

    def loss_boxes(self, outputs, targets, indices, num_boxes):
        if 'pred_boxes' not in outputs:
            raise KeyError('No predicted boxes found in outputs')
        idx = self._get_source_permutation_idx(indices)
        source_boxes = outputs['pred_boxes'][idx]
        target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
        loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none')
        losses = {}
        losses['loss_bbox'] = loss_bbox.sum() / num_boxes
        loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)))
        losses['loss_giou'] = loss_giou.sum() / num_boxes
        return losses

    def loss_masks(self, outputs, targets, indices, num_boxes):
        if 'pred_masks' not in outputs:
            raise KeyError('No predicted masks found in outputs')
        source_idx = self._get_source_permutation_idx(indices)
        target_idx = self._get_target_permutation_idx(indices)
        source_masks = outputs['pred_masks']
        source_masks = source_masks[source_idx]
        masks = [t['masks'] for t in targets]
        (target_masks, valid) = nested_tensor_from_tensor_list(masks).decompose()
        target_masks = target_masks.to(source_masks)
        target_masks = target_masks[target_idx]
        source_masks = nn.functional.interpolate(source_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False)
        source_masks = source_masks[:, 0].flatten(1)
        target_masks = target_masks.flatten(1)
        target_masks = target_masks.view(source_masks.shape)
        losses = {'loss_mask': sigmoid_focal_loss(source_masks, target_masks, num_boxes), 'loss_dice': dice_loss(source_masks, target_masks, num_boxes)}
        return losses

    def _get_source_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(source, i) for (i, (source, _)) in enumerate(indices)])
        source_idx = torch.cat([source for (source, _) in indices])
        return (batch_idx, source_idx)

    def _get_target_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(target, i) for (i, (_, target)) in enumerate(indices)])
        target_idx = torch.cat([target for (_, target) in indices])
        return (batch_idx, target_idx)

    def get_loss(self, loss, outputs, targets, indices, num_boxes):
        loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks}
        if loss not in loss_map:
            raise ValueError(f'Loss {loss} not supported')
        return loss_map[loss](outputs, targets, indices, num_boxes)

    def forward(self, outputs, targets):
        outputs_without_aux = {k: v for (k, v) in outputs.items() if k != 'auxiliary_outputs'}
        indices = self.matcher(outputs_without_aux, targets)
        num_boxes = sum((len(t['class_labels']) for t in targets))
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        world_size = 1
        if is_accelerate_available():
            if PartialState._shared_state != {}:
                num_boxes = reduce(num_boxes)
                world_size = PartialState().num_processes
        num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
        if 'auxiliary_outputs' in outputs:
            for (i, auxiliary_outputs) in enumerate(outputs['auxiliary_outputs']):
                indices = self.matcher(auxiliary_outputs, targets)
                for loss in self.losses:
                    if loss == 'masks':
                        continue
                    l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
                    l_dict = {k + f'_{i}': v for (k, v) in l_dict.items()}
                    losses.update(l_dict)
        return losses

class ConditionalDetrMLPPredictionHead(nn.Module):

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim])))

    def forward(self, x):
        for (i, layer) in enumerate(self.layers):
            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class ConditionalDetrHungarianMatcher(nn.Module):

    def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):
        super().__init__()
        requires_backends(self, ['scipy'])
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):
            raise ValueError("All costs of the Matcher can't be 0")

    @torch.no_grad()
    def forward(self, outputs, targets):
        (batch_size, num_queries) = outputs['logits'].shape[:2]
        out_prob = outputs['logits'].flatten(0, 1).sigmoid()
        out_bbox = outputs['pred_boxes'].flatten(0, 1)
        target_ids = torch.cat([v['class_labels'] for v in targets])
        target_bbox = torch.cat([v['boxes'] for v in targets])
        alpha = 0.25
        gamma = 2.0
        neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log()
        pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log()
        class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
        sizes = [len(v['boxes']) for v in targets]
        indices = [linear_sum_assignment(c[i]) for (i, c) in enumerate(cost_matrix.split(sizes, -1))]
        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for (i, j) in indices]

def _upcast(t: Tensor) -> Tensor:
    if t.is_floating_point():
        return t if t.dtype in (torch.float32, torch.float64) else t.float()
    else:
        return t if t.dtype in (torch.int32, torch.int64) else t.int()

def box_area(boxes: Tensor) -> Tensor:
    boxes = _upcast(boxes)
    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])

def box_iou(boxes1, boxes2):
    area1 = box_area(boxes1)
    area2 = box_area(boxes2)
    left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2])
    right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (right_bottom - left_top).clamp(min=0)
    inter = width_height[:, :, 0] * width_height[:, :, 1]
    union = area1[:, None] + area2 - inter
    iou = inter / union
    return (iou, union)

def generalized_box_iou(boxes1, boxes2):
    if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
        raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}')
    if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
        raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}')
    (iou, union) = box_iou(boxes1, boxes2)
    top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
    bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (bottom_right - top_left).clamp(min=0)
    area = width_height[:, :, 0] * width_height[:, :, 1]
    return iou - (area - union) / area

def _max_by_axis(the_list):
    maxes = the_list[0]
    for sublist in the_list[1:]:
        for (index, item) in enumerate(sublist):
            maxes[index] = max(maxes[index], item)
    return maxes

class NestedTensor:

    def __init__(self, tensors, mask: Optional[Tensor]):
        self.tensors = tensors
        self.mask = mask

    def to(self, device):
        cast_tensor = self.tensors.to(device)
        mask = self.mask
        if mask is not None:
            cast_mask = mask.to(device)
        else:
            cast_mask = None
        return NestedTensor(cast_tensor, cast_mask)

    def decompose(self):
        return (self.tensors, self.mask)

    def __repr__(self):
        return str(self.tensors)

def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
    if tensor_list[0].ndim == 3:
        max_size = _max_by_axis([list(img.shape) for img in tensor_list])
        batch_shape = [len(tensor_list)] + max_size
        (batch_size, num_channels, height, width) = batch_shape
        dtype = tensor_list[0].dtype
        device = tensor_list[0].device
        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
        mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
        for (img, pad_img, m) in zip(tensor_list, tensor, mask):
            pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img)
            m[:img.shape[1], :img.shape[2]] = False
    else:
        raise ValueError('Only 3-dimensional tensors are supported')
    return NestedTensor(tensor, mask)

# File: transformers-main/src/transformers/models/convbert/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_convbert': ['ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer']}
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_convbert_fast'] = ['ConvBertTokenizerFast']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_convbert'] = ['ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_convbert'] = ['TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_convbert import ConvBertConfig, ConvBertOnnxConfig
    from .tokenization_convbert import ConvBertTokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_convbert_fast import ConvBertTokenizerFast
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_convbert import ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_convbert import TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/convbert/configuration_convbert.py
""""""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class ConvBertConfig(PretrainedConfig):
    model_type = 'convbert'

    def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, embedding_size=768, head_ratio=2, conv_kernel_size=9, num_groups=1, classifier_dropout=None, **kwargs):
        super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.embedding_size = embedding_size
        self.head_ratio = head_ratio
        self.conv_kernel_size = conv_kernel_size
        self.num_groups = num_groups
        self.classifier_dropout = classifier_dropout

class ConvBertOnnxConfig(OnnxConfig):

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        if self.task == 'multiple-choice':
            dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
        else:
            dynamic_axis = {0: 'batch', 1: 'sequence'}
        return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])

# File: transformers-main/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py
""""""
import argparse
from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
from transformers.utils import logging
logging.set_verbosity_info()

def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
    conf = ConvBertConfig.from_json_file(convbert_config_file)
    model = ConvBertModel(conf)
    model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
    model.save_pretrained(pytorch_dump_path)
    tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
    tf_model.save_pretrained(pytorch_dump_path)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.')
    parser.add_argument('--convbert_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained ConvBERT model. \nThis specifies the model architecture.')
    parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.')
    args = parser.parse_args()
    convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)

# File: transformers-main/src/transformers/models/convbert/modeling_convbert.py
""""""
import math
import os
from operator import attrgetter
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, get_activation
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...modeling_utils import PreTrainedModel, SequenceSummary
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'YituTech/conv-bert-base'
_CONFIG_FOR_DOC = 'ConvBertConfig'

def load_tf_weights_in_convbert(model, config, tf_checkpoint_path):
    try:
        import tensorflow as tf
    except ImportError:
        logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.')
        raise
    tf_path = os.path.abspath(tf_checkpoint_path)
    logger.info(f'Converting TensorFlow checkpoint from {tf_path}')
    init_vars = tf.train.list_variables(tf_path)
    tf_data = {}
    for (name, shape) in init_vars:
        logger.info(f'Loading TF weight {name} with shape {shape}')
        array = tf.train.load_variable(tf_path, name)
        tf_data[name] = array
    param_mapping = {'embeddings.word_embeddings.weight': 'electra/embeddings/word_embeddings', 'embeddings.position_embeddings.weight': 'electra/embeddings/position_embeddings', 'embeddings.token_type_embeddings.weight': 'electra/embeddings/token_type_embeddings', 'embeddings.LayerNorm.weight': 'electra/embeddings/LayerNorm/gamma', 'embeddings.LayerNorm.bias': 'electra/embeddings/LayerNorm/beta', 'embeddings_project.weight': 'electra/embeddings_project/kernel', 'embeddings_project.bias': 'electra/embeddings_project/bias'}
    if config.num_groups > 1:
        group_dense_name = 'g_dense'
    else:
        group_dense_name = 'dense'
    for j in range(config.num_hidden_layers):
        param_mapping[f'encoder.layer.{j}.attention.self.query.weight'] = f'electra/encoder/layer_{j}/attention/self/query/kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.query.bias'] = f'electra/encoder/layer_{j}/attention/self/query/bias'
        param_mapping[f'encoder.layer.{j}.attention.self.key.weight'] = f'electra/encoder/layer_{j}/attention/self/key/kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.key.bias'] = f'electra/encoder/layer_{j}/attention/self/key/bias'
        param_mapping[f'encoder.layer.{j}.attention.self.value.weight'] = f'electra/encoder/layer_{j}/attention/self/value/kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.value.bias'] = f'electra/encoder/layer_{j}/attention/self/value/bias'
        param_mapping[f'encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.key_conv_attn_layer.bias'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_key/bias'
        param_mapping[f'encoder.layer.{j}.attention.self.conv_kernel_layer.weight'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.conv_kernel_layer.bias'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias'
        param_mapping[f'encoder.layer.{j}.attention.self.conv_out_layer.weight'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel'
        param_mapping[f'encoder.layer.{j}.attention.self.conv_out_layer.bias'] = f'electra/encoder/layer_{j}/attention/self/conv_attn_point/bias'
        param_mapping[f'encoder.layer.{j}.attention.output.dense.weight'] = f'electra/encoder/layer_{j}/attention/output/dense/kernel'
        param_mapping[f'encoder.layer.{j}.attention.output.LayerNorm.weight'] = f'electra/encoder/layer_{j}/attention/output/LayerNorm/gamma'
        param_mapping[f'encoder.layer.{j}.attention.output.dense.bias'] = f'electra/encoder/layer_{j}/attention/output/dense/bias'
        param_mapping[f'encoder.layer.{j}.attention.output.LayerNorm.bias'] = f'electra/encoder/layer_{j}/attention/output/LayerNorm/beta'
        param_mapping[f'encoder.layer.{j}.intermediate.dense.weight'] = f'electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel'
        param_mapping[f'encoder.layer.{j}.intermediate.dense.bias'] = f'electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias'
        param_mapping[f'encoder.layer.{j}.output.dense.weight'] = f'electra/encoder/layer_{j}/output/{group_dense_name}/kernel'
        param_mapping[f'encoder.layer.{j}.output.dense.bias'] = f'electra/encoder/layer_{j}/output/{group_dense_name}/bias'
        param_mapping[f'encoder.layer.{j}.output.LayerNorm.weight'] = f'electra/encoder/layer_{j}/output/LayerNorm/gamma'
        param_mapping[f'encoder.layer.{j}.output.LayerNorm.bias'] = f'electra/encoder/layer_{j}/output/LayerNorm/beta'
    for param in model.named_parameters():
        param_name = param[0]
        retriever = attrgetter(param_name)
        result = retriever(model)
        tf_name = param_mapping[param_name]
        value = torch.from_numpy(tf_data[tf_name])
        logger.info(f'TF: {tf_name}, PT: {param_name} ')
        if tf_name.endswith('/kernel'):
            if not tf_name.endswith('/intermediate/g_dense/kernel'):
                if not tf_name.endswith('/output/g_dense/kernel'):
                    value = value.T
        if tf_name.endswith('/depthwise_kernel'):
            value = value.permute(1, 2, 0)
        if tf_name.endswith('/pointwise_kernel'):
            value = value.permute(2, 1, 0)
        if tf_name.endswith('/conv_attn_key/bias'):
            value = value.unsqueeze(-1)
        result.data = value
    return model

class ConvBertEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
        self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
        self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)

    def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.LongTensor:
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]
        seq_length = input_shape[1]
        if position_ids is None:
            position_ids = self.position_ids[:, :seq_length]
        if token_type_ids is None:
            if hasattr(self, 'token_type_ids'):
                buffered_token_type_ids = self.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)
        embeddings = inputs_embeds + position_embeddings + token_type_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

class ConvBertPreTrainedModel(PreTrainedModel):
    config_class = ConvBertConfig
    load_tf_weights = load_tf_weights_in_convbert
    base_model_prefix = 'convbert'
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)

class SeparableConv1D(nn.Module):

    def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs):
        super().__init__()
        self.depthwise = nn.Conv1d(input_filters, input_filters, kernel_size=kernel_size, groups=input_filters, padding=kernel_size // 2, bias=False)
        self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
        self.bias = nn.Parameter(torch.zeros(output_filters, 1))
        self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
        self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        x = self.depthwise(hidden_states)
        x = self.pointwise(x)
        x += self.bias
        return x

class ConvBertSelfAttention(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        new_num_attention_heads = config.num_attention_heads // config.head_ratio
        if new_num_attention_heads < 1:
            self.head_ratio = config.num_attention_heads
            self.num_attention_heads = 1
        else:
            self.num_attention_heads = new_num_attention_heads
            self.head_ratio = config.head_ratio
        self.conv_kernel_size = config.conv_kernel_size
        if config.hidden_size % self.num_attention_heads != 0:
            raise ValueError('hidden_size should be divisible by num_attention_heads')
        self.attention_head_size = config.hidden_size // self.num_attention_heads // 2
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)
        self.key_conv_attn_layer = SeparableConv1D(config, config.hidden_size, self.all_head_size, self.conv_kernel_size)
        self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
        self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size)
        self.unfold = nn.Unfold(kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0])
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
        mixed_query_layer = self.query(hidden_states)
        batch_size = hidden_states.size(0)
        if encoder_hidden_states is not None:
            mixed_key_layer = self.key(encoder_hidden_states)
            mixed_value_layer = self.value(encoder_hidden_states)
        else:
            mixed_key_layer = self.key(hidden_states)
            mixed_value_layer = self.value(hidden_states)
        mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2))
        mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
        query_layer = self.transpose_for_scores(mixed_query_layer)
        key_layer = self.transpose_for_scores(mixed_key_layer)
        value_layer = self.transpose_for_scores(mixed_value_layer)
        conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
        conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
        conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
        conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
        conv_out_layer = self.conv_out_layer(hidden_states)
        conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
        conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
        conv_out_layer = nn.functional.unfold(conv_out_layer, kernel_size=[self.conv_kernel_size, 1], dilation=1, padding=[(self.conv_kernel_size - 1) // 2, 0], stride=1)
        conv_out_layer = conv_out_layer.transpose(1, 2).reshape(batch_size, -1, self.all_head_size, self.conv_kernel_size)
        conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
        conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
        conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)
        attention_probs = self.dropout(attention_probs)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
        context_layer = torch.cat([context_layer, conv_out], 2)
        new_context_layer_shape = context_layer.size()[:-2] + (self.num_attention_heads * self.attention_head_size * 2,)
        context_layer = context_layer.view(*new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

class ConvBertSelfOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class ConvBertAttention(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.self = ConvBertSelfAttention(config)
        self.output = ConvBertSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
        self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, output_attentions)
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

class GroupedLinearLayer(nn.Module):

    def __init__(self, input_size, output_size, num_groups):
        super().__init__()
        self.input_size = input_size
        self.output_size = output_size
        self.num_groups = num_groups
        self.group_in_dim = self.input_size // self.num_groups
        self.group_out_dim = self.output_size // self.num_groups
        self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim))
        self.bias = nn.Parameter(torch.empty(output_size))

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        batch_size = list(hidden_states.size())[0]
        x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim])
        x = x.permute(1, 0, 2)
        x = torch.matmul(x, self.weight)
        x = x.permute(1, 0, 2)
        x = torch.reshape(x, [batch_size, -1, self.output_size])
        x = x + self.bias
        return x

class ConvBertIntermediate(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.num_groups == 1:
            self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        else:
            self.dense = GroupedLinearLayer(input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class ConvBertOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.num_groups == 1:
            self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        else:
            self.dense = GroupedLinearLayer(input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class ConvBertLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = ConvBertAttention(config)
        self.is_decoder = config.is_decoder
        self.add_cross_attention = config.add_cross_attention
        if self.add_cross_attention:
            if not self.is_decoder:
                raise TypeError(f'{self} should be used as a decoder model if cross attention is added')
            self.crossattention = ConvBertAttention(config)
        self.intermediate = ConvBertIntermediate(config)
        self.output = ConvBertOutput(config)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
        self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, 'crossattention'):
                raise AttributeError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
            cross_attention_outputs = self.crossattention(attention_output, encoder_attention_mask, head_mask, encoder_hidden_states, output_attentions)
            attention_output = cross_attention_outputs[0]
            outputs = outputs + cross_attention_outputs[1:]
        layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
        outputs = (layer_output,) + outputs
        return outputs

    def feed_forward_chunk(self, attention_output):
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output

class ConvBertEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_head_mask = head_mask[i] if head_mask is not None else None
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions)
            else:
                layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
        return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)

class ConvBertPredictionHeadTransform(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states
CONVBERT_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVBERT_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.', CONVBERT_START_DOCSTRING)
class ConvBertModel(ConvBertPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.embeddings = ConvBertEmbeddings(config)
        if config.embedding_size != config.hidden_size:
            self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
        self.encoder = ConvBertEncoder(config)
        self.config = config
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        (batch_size, seq_length) = input_shape
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        if attention_mask is None:
            attention_mask = torch.ones(input_shape, device=device)
        if token_type_ids is None:
            if hasattr(self.embeddings, 'token_type_ids'):
                buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
        extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
        hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
        if hasattr(self, 'embeddings_project'):
            hidden_states = self.embeddings_project(hidden_states)
        hidden_states = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        return hidden_states

class ConvBertGeneratorPredictions(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.activation = get_activation('gelu')
        self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
        self.dense = nn.Linear(config.hidden_size, config.embedding_size)

    def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor:
        hidden_states = self.dense(generator_hidden_states)
        hidden_states = self.activation(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

@add_start_docstrings('ConvBERT Model with a `language modeling` head on top.', CONVBERT_START_DOCSTRING)
class ConvBertForMaskedLM(ConvBertPreTrainedModel):
    _tied_weights_keys = ['generator.lm_head.weight']

    def __init__(self, config):
        super().__init__(config)
        self.convbert = ConvBertModel(config)
        self.generator_predictions = ConvBertGeneratorPredictions(config)
        self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
        self.post_init()

    def get_output_embeddings(self):
        return self.generator_lm_head

    def set_output_embeddings(self, word_embeddings):
        self.generator_lm_head = word_embeddings

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        generator_hidden_states = self.convbert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict)
        generator_sequence_output = generator_hidden_states[0]
        prediction_scores = self.generator_predictions(generator_sequence_output)
        prediction_scores = self.generator_lm_head(prediction_scores)
        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        if not return_dict:
            output = (prediction_scores,) + generator_hidden_states[1:]
            return (loss,) + output if loss is not None else output
        return MaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions)

class ConvBertClassificationHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
        self.config = config

    def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
        x = hidden_states[:, 0, :]
        x = self.dropout(x)
        x = self.dense(x)
        x = ACT2FN[self.config.hidden_act](x)
        x = self.dropout(x)
        x = self.out_proj(x)
        return x

@add_start_docstrings('\n    ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', CONVBERT_START_DOCSTRING)
class ConvBertForSequenceClassification(ConvBertPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config
        self.convbert = ConvBertModel(config)
        self.classifier = ConvBertClassificationHead(config)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n    softmax) e.g. for RocStories/SWAG tasks.\n    ', CONVBERT_START_DOCSTRING)
class ConvBertForMultipleChoice(ConvBertPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.convbert = ConvBertModel(config)
        self.sequence_summary = SequenceSummary(config)
        self.classifier = nn.Linear(config.hidden_size, 1)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        pooled_output = self.sequence_summary(sequence_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
        if not return_dict:
            output = (reshaped_logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', CONVBERT_START_DOCSTRING)
class ConvBertForTokenClassification(ConvBertPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.convbert = ConvBertModel(config)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', CONVBERT_START_DOCSTRING)
class ConvBertForQuestionAnswering(ConvBertPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.convbert = ConvBertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1).contiguous()
        end_logits = end_logits.squeeze(-1).contiguous()
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return (total_loss,) + output if total_loss is not None else output
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/convbert/modeling_tf_convbert.py
""""""
from __future__ import annotations
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput
from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'YituTech/conv-bert-base'
_CONFIG_FOR_DOC = 'ConvBertConfig'

class TFConvBertEmbeddings(keras.layers.Layer):

    def __init__(self, config: ConvBertConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = config.embedding_size
        self.max_position_embeddings = config.max_position_embeddings
        self.initializer_range = config.initializer_range
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)

    def build(self, input_shape=None):
        with tf.name_scope('word_embeddings'):
            self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
        with tf.name_scope('token_type_embeddings'):
            self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
        with tf.name_scope('position_embeddings'):
            self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range))
        if self.built:
            return
        self.built = True
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.embedding_size])

    def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
        if input_ids is None and inputs_embeds is None:
            raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
        if input_ids is not None:
            check_embeddings_within_bounds(input_ids, self.config.vocab_size)
            inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
        input_shape = shape_list(inputs_embeds)[:-1]
        if token_type_ids is None:
            token_type_ids = tf.fill(dims=input_shape, value=0)
        if position_ids is None:
            position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0)
        position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
        token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
        final_embeddings = inputs_embeds + position_embeds + token_type_embeds
        final_embeddings = self.LayerNorm(inputs=final_embeddings)
        final_embeddings = self.dropout(inputs=final_embeddings, training=training)
        return final_embeddings

class TFConvBertSelfAttention(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
        if new_num_attention_heads < 1:
            self.head_ratio = config.num_attention_heads
            num_attention_heads = 1
        else:
            num_attention_heads = new_num_attention_heads
            self.head_ratio = config.head_ratio
        self.num_attention_heads = num_attention_heads
        self.conv_kernel_size = config.conv_kernel_size
        if config.hidden_size % self.num_attention_heads != 0:
            raise ValueError('hidden_size should be divisible by num_attention_heads')
        self.attention_head_size = config.hidden_size // config.num_attention_heads
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query')
        self.key = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key')
        self.value = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value')
        self.key_conv_attn_layer = keras.layers.SeparableConv1D(self.all_head_size, self.conv_kernel_size, padding='same', activation=None, depthwise_initializer=get_initializer(1 / self.conv_kernel_size), pointwise_initializer=get_initializer(config.initializer_range), name='key_conv_attn_layer')
        self.conv_kernel_layer = keras.layers.Dense(self.num_attention_heads * self.conv_kernel_size, activation=None, name='conv_kernel_layer', kernel_initializer=get_initializer(config.initializer_range))
        self.conv_out_layer = keras.layers.Dense(self.all_head_size, activation=None, name='conv_out_layer', kernel_initializer=get_initializer(config.initializer_range))
        self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
        self.config = config

    def transpose_for_scores(self, x, batch_size):
        x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
        return tf.transpose(x, perm=[0, 2, 1, 3])

    def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
        batch_size = shape_list(hidden_states)[0]
        mixed_query_layer = self.query(hidden_states)
        mixed_key_layer = self.key(hidden_states)
        mixed_value_layer = self.value(hidden_states)
        mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
        query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
        key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
        conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
        conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
        conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
        conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1)
        paddings = tf.constant([[0, 0], [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)], [0, 0]])
        conv_out_layer = self.conv_out_layer(hidden_states)
        conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
        conv_out_layer = tf.pad(conv_out_layer, paddings, 'CONSTANT')
        unfold_conv_out_layer = tf.stack([tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size]) for i in range(self.conv_kernel_size)], axis=-1)
        conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
        conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
        conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
        attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
        dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype)
        attention_scores = attention_scores / tf.math.sqrt(dk)
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask
        attention_probs = stable_softmax(attention_scores, axis=-1)
        attention_probs = self.dropout(attention_probs, training=training)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        value_layer = tf.reshape(mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
        value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
        context_layer = tf.matmul(attention_probs, value_layer)
        context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
        conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
        context_layer = tf.concat([context_layer, conv_out], 2)
        context_layer = tf.reshape(context_layer, (batch_size, -1, self.head_ratio * self.all_head_size))
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'query', None) is not None:
            with tf.name_scope(self.query.name):
                self.query.build([None, None, self.config.hidden_size])
        if getattr(self, 'key', None) is not None:
            with tf.name_scope(self.key.name):
                self.key.build([None, None, self.config.hidden_size])
        if getattr(self, 'value', None) is not None:
            with tf.name_scope(self.value.name):
                self.value.build([None, None, self.config.hidden_size])
        if getattr(self, 'key_conv_attn_layer', None) is not None:
            with tf.name_scope(self.key_conv_attn_layer.name):
                self.key_conv_attn_layer.build([None, None, self.config.hidden_size])
        if getattr(self, 'conv_kernel_layer', None) is not None:
            with tf.name_scope(self.conv_kernel_layer.name):
                self.conv_kernel_layer.build([None, None, self.all_head_size])
        if getattr(self, 'conv_out_layer', None) is not None:
            with tf.name_scope(self.conv_out_layer.name):
                self.conv_out_layer.build([None, None, self.config.hidden_size])

class TFConvBertSelfOutput(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states, input_tensor, training=False):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])

class TFConvBertAttention(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.self_attention = TFConvBertSelfAttention(config, name='self')
        self.dense_output = TFConvBertSelfOutput(config, name='output')

    def prune_heads(self, heads):
        raise NotImplementedError

    def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
        self_outputs = self.self_attention(input_tensor, attention_mask, head_mask, output_attentions, training=training)
        attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'self_attention', None) is not None:
            with tf.name_scope(self.self_attention.name):
                self.self_attention.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class GroupedLinearLayer(keras.layers.Layer):

    def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
        super().__init__(**kwargs)
        self.input_size = input_size
        self.output_size = output_size
        self.num_groups = num_groups
        self.kernel_initializer = kernel_initializer
        self.group_in_dim = self.input_size // self.num_groups
        self.group_out_dim = self.output_size // self.num_groups

    def build(self, input_shape=None):
        self.kernel = self.add_weight('kernel', shape=[self.group_out_dim, self.group_in_dim, self.num_groups], initializer=self.kernel_initializer, trainable=True)
        self.bias = self.add_weight('bias', shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True)
        super().build(input_shape)

    def call(self, hidden_states):
        batch_size = shape_list(hidden_states)[0]
        x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
        x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
        x = tf.transpose(x, [1, 0, 2])
        x = tf.reshape(x, [batch_size, -1, self.output_size])
        x = tf.nn.bias_add(value=x, bias=self.bias)
        return x

class TFConvBertIntermediate(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        if config.num_groups == 1:
            self.dense = keras.layers.Dense(config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        else:
            self.dense = GroupedLinearLayer(config.hidden_size, config.intermediate_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.intermediate_act_fn = config.hidden_act
        self.config = config

    def call(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFConvBertOutput(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        if config.num_groups == 1:
            self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        else:
            self.dense = GroupedLinearLayer(config.intermediate_size, config.hidden_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states, input_tensor, training=False):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.intermediate_size])

class TFConvBertLayer(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFConvBertAttention(config, name='attention')
        self.intermediate = TFConvBertIntermediate(config, name='intermediate')
        self.bert_output = TFConvBertOutput(config, name='output')

    def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
        attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions, training=training)
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.bert_output(intermediate_output, attention_output, training=training)
        outputs = (layer_output,) + attention_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'bert_output', None) is not None:
            with tf.name_scope(self.bert_output.name):
                self.bert_output.build(None)

class TFConvBertEncoder(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.layer = [TFConvBertLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)]

    def call(self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions, training=training)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'layer', None) is not None:
            for layer in self.layer:
                with tf.name_scope(layer.name):
                    layer.build(None)

class TFConvBertPredictionHeadTransform(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.config = config

    def call(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])

@keras_serializable
class TFConvBertMainLayer(keras.layers.Layer):
    config_class = ConvBertConfig

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.embeddings = TFConvBertEmbeddings(config, name='embeddings')
        if config.embedding_size != config.hidden_size:
            self.embeddings_project = keras.layers.Dense(config.hidden_size, name='embeddings_project')
        self.encoder = TFConvBertEncoder(config, name='encoder')
        self.config = config

    def get_input_embeddings(self):
        return self.embeddings

    def set_input_embeddings(self, value):
        self.embeddings.weight = value
        self.embeddings.vocab_size = value.shape[0]

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
        if attention_mask is None:
            attention_mask = tf.fill(input_shape, 1)
        extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
        extended_attention_mask = tf.cast(extended_attention_mask, dtype)
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

    def get_head_mask(self, head_mask):
        if head_mask is not None:
            raise NotImplementedError
        else:
            head_mask = [None] * self.config.num_hidden_layers
        return head_mask

    @unpack_inputs
    def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False):
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            input_shape = shape_list(input_ids)
        elif inputs_embeds is not None:
            input_shape = shape_list(inputs_embeds)[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        if attention_mask is None:
            attention_mask = tf.fill(input_shape, 1)
        if token_type_ids is None:
            token_type_ids = tf.fill(input_shape, 0)
        hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
        extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype)
        head_mask = self.get_head_mask(head_mask)
        if hasattr(self, 'embeddings_project'):
            hidden_states = self.embeddings_project(hidden_states, training=training)
        hidden_states = self.encoder(hidden_states, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'embeddings_project', None) is not None:
            with tf.name_scope(self.embeddings_project.name):
                self.embeddings_project.build([None, None, self.config.embedding_size])

class TFConvBertPreTrainedModel(TFPreTrainedModel):
    config_class = ConvBertConfig
    base_model_prefix = 'convbert'
CONVBERT_START_DOCSTRING = '\n\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Args:\n        config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVBERT_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n            [`PreTrainedTokenizer.encode`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n            config will be used instead.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n            used instead.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n            eager mode, in graph mode the value will always be set to True.\n        training (`bool`, *optional*, defaults to `False`):\n            Whether or not to use the model in training mode (some modules like dropout modules have different\n            behaviors between training and evaluation).\n"

@add_start_docstrings('The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.', CONVBERT_START_DOCSTRING)
class TFConvBertModel(TFConvBertPreTrainedModel):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.convbert = TFConvBertMainLayer(config, name='convbert')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: Optional[Union[np.array, tf.Tensor]]=None, token_type_ids: Optional[Union[np.array, tf.Tensor]]=None, position_ids: Optional[Union[np.array, tf.Tensor]]=None, head_mask: Optional[Union[np.array, tf.Tensor]]=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        outputs = self.convbert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)

class TFConvBertMaskedLMHead(keras.layers.Layer):

    def __init__(self, config, input_embeddings, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = config.embedding_size
        self.input_embeddings = input_embeddings

    def build(self, input_shape):
        self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias')
        super().build(input_shape)

    def get_output_embeddings(self):
        return self.input_embeddings

    def set_output_embeddings(self, value):
        self.input_embeddings.weight = value
        self.input_embeddings.vocab_size = shape_list(value)[0]

    def get_bias(self):
        return {'bias': self.bias}

    def set_bias(self, value):
        self.bias = value['bias']
        self.config.vocab_size = shape_list(value['bias'])[0]

    def call(self, hidden_states):
        seq_length = shape_list(tensor=hidden_states)[1]
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
        hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
        hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
        return hidden_states

class TFConvBertGeneratorPredictions(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dense = keras.layers.Dense(config.embedding_size, name='dense')
        self.config = config

    def call(self, generator_hidden_states, training=False):
        hidden_states = self.dense(generator_hidden_states)
        hidden_states = get_tf_activation('gelu')(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.embedding_size])
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

@add_start_docstrings('ConvBERT Model with a `language modeling` head on top.', CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, **kwargs)
        self.config = config
        self.convbert = TFConvBertMainLayer(config, name='convbert')
        self.generator_predictions = TFConvBertGeneratorPredictions(config, name='generator_predictions')
        if isinstance(config.hidden_act, str):
            self.activation = get_tf_activation(config.hidden_act)
        else:
            self.activation = config.hidden_act
        self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name='generator_lm_head')

    def get_lm_head(self):
        return self.generator_lm_head

    def get_prefix_bias_name(self):
        return self.name + '/' + self.generator_lm_head.name

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFMaskedLMOutput]:
        generator_hidden_states = self.convbert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        generator_sequence_output = generator_hidden_states[0]
        prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
        prediction_scores = self.generator_lm_head(prediction_scores, training=training)
        loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
        if not return_dict:
            output = (prediction_scores,) + generator_hidden_states[1:]
            return (loss,) + output if loss is not None else output
        return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)
        if getattr(self, 'generator_predictions', None) is not None:
            with tf.name_scope(self.generator_predictions.name):
                self.generator_predictions.build(None)
        if getattr(self, 'generator_lm_head', None) is not None:
            with tf.name_scope(self.generator_lm_head.name):
                self.generator_lm_head.build(None)

class TFConvBertClassificationHead(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = keras.layers.Dropout(classifier_dropout)
        self.out_proj = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='out_proj')
        self.config = config

    def call(self, hidden_states, **kwargs):
        x = hidden_states[:, 0, :]
        x = self.dropout(x)
        x = self.dense(x)
        x = get_tf_activation(self.config.hidden_act)(x)
        x = self.dropout(x)
        x = self.out_proj(x)
        return x

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'out_proj', None) is not None:
            with tf.name_scope(self.out_proj.name):
                self.out_proj.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.\n    ', CONVBERT_START_DOCSTRING)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.convbert = TFConvBertMainLayer(config, name='convbert')
        self.classifier = TFConvBertClassificationHead(config, name='classifier')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFSequenceClassifierOutput]:
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        logits = self.classifier(outputs[0], training=training)
        loss = None if labels is None else self.hf_compute_loss(labels, logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build(None)

@add_start_docstrings('\n    ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n    softmax) e.g. for RocStories/SWAG tasks.\n    ', CONVBERT_START_DOCSTRING)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.convbert = TFConvBertMainLayer(config, name='convbert')
        self.sequence_summary = TFSequenceSummary(config, initializer_range=config.initializer_range, name='sequence_summary')
        self.classifier = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFMultipleChoiceModelOutput]:
        if input_ids is not None:
            num_choices = shape_list(input_ids)[1]
            seq_length = shape_list(input_ids)[2]
        else:
            num_choices = shape_list(inputs_embeds)[1]
            seq_length = shape_list(inputs_embeds)[2]
        flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
        flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
        flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
        flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
        flat_inputs_embeds = tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None
        outputs = self.convbert(flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training)
        logits = self.sequence_summary(outputs[0], training=training)
        logits = self.classifier(logits)
        reshaped_logits = tf.reshape(logits, (-1, num_choices))
        loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
        if not return_dict:
            output = (reshaped_logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)
        if getattr(self, 'sequence_summary', None) is not None:
            with tf.name_scope(self.sequence_summary.name):
                self.sequence_summary.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', CONVBERT_START_DOCSTRING)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.convbert = TFConvBertMainLayer(config, name='convbert')
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = keras.layers.Dropout(classifier_dropout)
        self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFTokenClassifierOutput]:
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output, training=training)
        logits = self.classifier(sequence_output)
        loss = None if labels is None else self.hf_compute_loss(labels, logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', CONVBERT_START_DOCSTRING)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.convbert = TFConvBertMainLayer(config, name='convbert')
        self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: tf.Tensor | None=None, end_positions: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
        outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = tf.split(logits, 2, axis=-1)
        start_logits = tf.squeeze(start_logits, axis=-1)
        end_logits = tf.squeeze(end_logits, axis=-1)
        loss = None
        if start_positions is not None and end_positions is not None:
            labels = {'start_position': start_positions}
            labels['end_position'] = end_positions
            loss = self.hf_compute_loss(labels, (start_logits, end_logits))
        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convbert', None) is not None:
            with tf.name_scope(self.convbert.name):
                self.convbert.build(None)
        if getattr(self, 'qa_outputs', None) is not None:
            with tf.name_scope(self.qa_outputs.name):
                self.qa_outputs.build([None, None, self.config.hidden_size])

# File: transformers-main/src/transformers/models/convbert/tokenization_convbert.py
""""""
import collections
import os
import unicodedata
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}

def load_vocab(vocab_file):
    vocab = collections.OrderedDict()
    with open(vocab_file, 'r', encoding='utf-8') as reader:
        tokens = reader.readlines()
    for (index, token) in enumerate(tokens):
        token = token.rstrip('\n')
        vocab[token] = index
    return vocab

def whitespace_tokenize(text):
    text = text.strip()
    if not text:
        return []
    tokens = text.split()
    return tokens

class ConvBertTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES

    def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
        if not os.path.isfile(vocab_file):
            raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
        self.vocab = load_vocab(vocab_file)
        self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
        self.do_basic_tokenize = do_basic_tokenize
        if do_basic_tokenize:
            self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
        self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
        super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)

    @property
    def do_lower_case(self):
        return self.basic_tokenizer.do_lower_case

    @property
    def vocab_size(self):
        return len(self.vocab)

    def get_vocab(self):
        return dict(self.vocab, **self.added_tokens_encoder)

    def _tokenize(self, text, split_special_tokens=False):
        split_tokens = []
        if self.do_basic_tokenize:
            for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None):
                if token in self.basic_tokenizer.never_split:
                    split_tokens.append(token)
                else:
                    split_tokens += self.wordpiece_tokenizer.tokenize(token)
        else:
            split_tokens = self.wordpiece_tokenizer.tokenize(text)
        return split_tokens

    def _convert_token_to_id(self, token):
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.ids_to_tokens.get(index, self.unk_token)

    def convert_tokens_to_string(self, tokens):
        out_string = ' '.join(tokens).replace(' ##', '').strip()
        return out_string

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is not None:
            return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
        return [1] + [0] * len(token_ids_0) + [1]

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        index = 0
        if os.path.isdir(save_directory):
            vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        else:
            vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
        with open(vocab_file, 'w', encoding='utf-8') as writer:
            for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
                    index = token_index
                writer.write(token + '\n')
                index += 1
        return (vocab_file,)

class BasicTokenizer:

    def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True):
        if never_split is None:
            never_split = []
        self.do_lower_case = do_lower_case
        self.never_split = set(never_split)
        self.tokenize_chinese_chars = tokenize_chinese_chars
        self.strip_accents = strip_accents
        self.do_split_on_punc = do_split_on_punc

    def tokenize(self, text, never_split=None):
        never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
        text = self._clean_text(text)
        if self.tokenize_chinese_chars:
            text = self._tokenize_chinese_chars(text)
        unicode_normalized_text = unicodedata.normalize('NFC', text)
        orig_tokens = whitespace_tokenize(unicode_normalized_text)
        split_tokens = []
        for token in orig_tokens:
            if token not in never_split:
                if self.do_lower_case:
                    token = token.lower()
                    if self.strip_accents is not False:
                        token = self._run_strip_accents(token)
                elif self.strip_accents:
                    token = self._run_strip_accents(token)
            split_tokens.extend(self._run_split_on_punc(token, never_split))
        output_tokens = whitespace_tokenize(' '.join(split_tokens))
        return output_tokens

    def _run_strip_accents(self, text):
        text = unicodedata.normalize('NFD', text)
        output = []
        for char in text:
            cat = unicodedata.category(char)
            if cat == 'Mn':
                continue
            output.append(char)
        return ''.join(output)

    def _run_split_on_punc(self, text, never_split=None):
        if not self.do_split_on_punc or (never_split is not None and text in never_split):
            return [text]
        chars = list(text)
        i = 0
        start_new_word = True
        output = []
        while i < len(chars):
            char = chars[i]
            if _is_punctuation(char):
                output.append([char])
                start_new_word = True
            else:
                if start_new_word:
                    output.append([])
                start_new_word = False
                output[-1].append(char)
            i += 1
        return [''.join(x) for x in output]

    def _tokenize_chinese_chars(self, text):
        output = []
        for char in text:
            cp = ord(char)
            if self._is_chinese_char(cp):
                output.append(' ')
                output.append(char)
                output.append(' ')
            else:
                output.append(char)
        return ''.join(output)

    def _is_chinese_char(self, cp):
        if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103):
            return True
        return False

    def _clean_text(self, text):
        output = []
        for char in text:
            cp = ord(char)
            if cp == 0 or cp == 65533 or _is_control(char):
                continue
            if _is_whitespace(char):
                output.append(' ')
            else:
                output.append(char)
        return ''.join(output)

class WordpieceTokenizer:

    def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
        self.vocab = vocab
        self.unk_token = unk_token
        self.max_input_chars_per_word = max_input_chars_per_word

    def tokenize(self, text):
        output_tokens = []
        for token in whitespace_tokenize(text):
            chars = list(token)
            if len(chars) > self.max_input_chars_per_word:
                output_tokens.append(self.unk_token)
                continue
            is_bad = False
            start = 0
            sub_tokens = []
            while start < len(chars):
                end = len(chars)
                cur_substr = None
                while start < end:
                    substr = ''.join(chars[start:end])
                    if start > 0:
                        substr = '##' + substr
                    if substr in self.vocab:
                        cur_substr = substr
                        break
                    end -= 1
                if cur_substr is None:
                    is_bad = True
                    break
                sub_tokens.append(cur_substr)
                start = end
            if is_bad:
                output_tokens.append(self.unk_token)
            else:
                output_tokens.extend(sub_tokens)
        return output_tokens

# File: transformers-main/src/transformers/models/convbert/tokenization_convbert_fast.py
""""""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}

class ConvBertTokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    slow_tokenizer_class = ConvBertTokenizer

    def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
        super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
        normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
        if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars:
            normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
            normalizer_state['lowercase'] = do_lower_case
            normalizer_state['strip_accents'] = strip_accents
            normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
            self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
        self.do_lower_case = do_lower_case

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        if token_ids_1 is not None:
            output += token_ids_1 + [self.sep_token_id]
        return output

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        files = self._tokenizer.model.save(save_directory, name=filename_prefix)
        return tuple(files)

# File: transformers-main/src/transformers/models/convnext/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available
_import_structure = {'configuration_convnext': ['ConvNextConfig', 'ConvNextOnnxConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['feature_extraction_convnext'] = ['ConvNextFeatureExtractor']
    _import_structure['image_processing_convnext'] = ['ConvNextImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_convnext'] = ['ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_convnext'] = ['TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_convnext import ConvNextConfig, ConvNextOnnxConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .feature_extraction_convnext import ConvNextFeatureExtractor
        from .image_processing_convnext import ConvNextImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_convnext import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure)

# File: transformers-main/src/transformers/models/convnext/configuration_convnext.py
""""""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)

class ConvNextConfig(BackboneConfigMixin, PretrainedConfig):
    model_type = 'convnext'

    def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-06, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs):
        super().__init__(**kwargs)
        self.num_channels = num_channels
        self.patch_size = patch_size
        self.num_stages = num_stages
        self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
        self.depths = [3, 3, 9, 3] if depths is None else depths
        self.hidden_act = hidden_act
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.layer_scale_init_value = layer_scale_init_value
        self.drop_path_rate = drop_path_rate
        self.image_size = image_size
        self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)]
        (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)

class ConvNextOnnxConfig(OnnxConfig):
    torch_onnx_minimum_version = version.parse('1.11')

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])

    @property
    def atol_for_validation(self) -> float:
        return 1e-05

# File: transformers-main/src/transformers/models/convnext/convert_convnext_to_pytorch.py
""""""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, ConvNextForImageClassification, ConvNextImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def get_convnext_config(checkpoint_url):
    config = ConvNextConfig()
    if 'tiny' in checkpoint_url:
        depths = [3, 3, 9, 3]
        hidden_sizes = [96, 192, 384, 768]
    if 'small' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [96, 192, 384, 768]
    if 'base' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [128, 256, 512, 1024]
    if 'large' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [192, 384, 768, 1536]
    if 'xlarge' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [256, 512, 1024, 2048]
    if '1k' in checkpoint_url:
        num_labels = 1000
        filename = 'imagenet-1k-id2label.json'
        expected_shape = (1, 1000)
    else:
        num_labels = 21841
        filename = 'imagenet-22k-id2label.json'
        expected_shape = (1, 21841)
    repo_id = 'huggingface/label-files'
    config.num_labels = num_labels
    id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
    id2label = {int(k): v for (k, v) in id2label.items()}
    if '1k' not in checkpoint_url:
        del id2label[9205]
        del id2label[15027]
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    config.hidden_sizes = hidden_sizes
    config.depths = depths
    return (config, expected_shape)

def rename_key(name):
    if 'downsample_layers.0.0' in name:
        name = name.replace('downsample_layers.0.0', 'embeddings.patch_embeddings')
    if 'downsample_layers.0.1' in name:
        name = name.replace('downsample_layers.0.1', 'embeddings.norm')
    if 'downsample_layers.1.0' in name:
        name = name.replace('downsample_layers.1.0', 'stages.1.downsampling_layer.0')
    if 'downsample_layers.1.1' in name:
        name = name.replace('downsample_layers.1.1', 'stages.1.downsampling_layer.1')
    if 'downsample_layers.2.0' in name:
        name = name.replace('downsample_layers.2.0', 'stages.2.downsampling_layer.0')
    if 'downsample_layers.2.1' in name:
        name = name.replace('downsample_layers.2.1', 'stages.2.downsampling_layer.1')
    if 'downsample_layers.3.0' in name:
        name = name.replace('downsample_layers.3.0', 'stages.3.downsampling_layer.0')
    if 'downsample_layers.3.1' in name:
        name = name.replace('downsample_layers.3.1', 'stages.3.downsampling_layer.1')
    if 'stages' in name and 'downsampling_layer' not in name:
        name = name[:len('stages.0')] + '.layers' + name[len('stages.0'):]
    if 'stages' in name:
        name = name.replace('stages', 'encoder.stages')
    if 'norm' in name:
        name = name.replace('norm', 'layernorm')
    if 'gamma' in name:
        name = name.replace('gamma', 'layer_scale_parameter')
    if 'head' in name:
        name = name.replace('head', 'classifier')
    return name

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_convnext_checkpoint(checkpoint_url, pytorch_dump_folder_path):
    (config, expected_shape) = get_convnext_config(checkpoint_url)
    state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)['model']
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        state_dict[rename_key(key)] = val
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        if not key.startswith('classifier'):
            key = 'convnext.' + key
        state_dict[key] = val
    model = ConvNextForImageClassification(config)
    model.load_state_dict(state_dict)
    model.eval()
    size = 224 if '224' in checkpoint_url else 384
    image_processor = ConvNextImageProcessor(size=size)
    pixel_values = image_processor(images=prepare_img(), return_tensors='pt').pixel_values
    logits = model(pixel_values).logits
    if checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth':
        expected_logits = torch.tensor([-0.121, -0.6605, 0.1918])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth':
        expected_logits = torch.tensor([-0.4473, -0.1847, -0.6365])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth':
        expected_logits = torch.tensor([0.4525, 0.7539, 0.0308])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth':
        expected_logits = torch.tensor([0.3561, 0.635, -0.0384])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth':
        expected_logits = torch.tensor([0.4174, -0.0989, 0.1489])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth':
        expected_logits = torch.tensor([0.2513, -0.1349, -0.1613])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth':
        expected_logits = torch.tensor([1.298, 0.3631, -0.1198])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth':
        expected_logits = torch.tensor([1.2963, 0.1227, 0.1723])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth':
        expected_logits = torch.tensor([1.7956, 0.839, 0.282])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth':
        expected_logits = torch.tensor([-0.2822, -0.0502, -0.0878])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth':
        expected_logits = torch.tensor([-0.5672, -0.073, -0.4348])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth':
        expected_logits = torch.tensor([0.2681, 0.2365, 0.6246])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth':
        expected_logits = torch.tensor([-0.2642, 0.3931, 0.5116])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth':
        expected_logits = torch.tensor([-0.6677, -0.1873, -0.8379])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth':
        expected_logits = torch.tensor([-0.7749, -0.2967, -0.6444])
    else:
        raise ValueError(f'Unknown URL: {checkpoint_url}')
    assert torch.allclose(logits[0, :3], expected_logits, atol=0.001)
    assert logits.shape == expected_shape
    Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
    print(f'Saving model to {pytorch_dump_folder_path}')
    model.save_pretrained(pytorch_dump_folder_path)
    print(f'Saving image processor to {pytorch_dump_folder_path}')
    image_processor.save_pretrained(pytorch_dump_folder_path)
    print('Pushing model to the hub...')
    model_name = 'convnext'
    if 'tiny' in checkpoint_url:
        model_name += '-tiny'
    elif 'small' in checkpoint_url:
        model_name += '-small'
    elif 'base' in checkpoint_url:
        model_name += '-base'
    elif 'xlarge' in checkpoint_url:
        model_name += '-xlarge'
    elif 'large' in checkpoint_url:
        model_name += '-large'
    if '224' in checkpoint_url:
        model_name += '-224'
    elif '384' in checkpoint_url:
        model_name += '-384'
    if '22k' in checkpoint_url and '1k' not in checkpoint_url:
        model_name += '-22k'
    if '22k' in checkpoint_url and '1k' in checkpoint_url:
        model_name += '-22k-1k'
    model.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add model')
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_url', default='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth', type=str, help="URL of the original ConvNeXT checkpoint you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.')
    args = parser.parse_args()
    convert_convnext_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)

# File: transformers-main/src/transformers/models/convnext/feature_extraction_convnext.py
""""""
import warnings
from ...utils import logging
from .image_processing_convnext import ConvNextImageProcessor
logger = logging.get_logger(__name__)

class ConvNextFeatureExtractor(ConvNextImageProcessor):

    def __init__(self, *args, **kwargs) -> None:
        warnings.warn('The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ConvNextImageProcessor instead.', FutureWarning)
        super().__init__(*args, **kwargs)

# File: transformers-main/src/transformers/models/convnext/image_processing_convnext.py
""""""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, get_resize_output_image_size, resize, to_channel_dimension_format
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
if is_vision_available():
    import PIL
logger = logging.get_logger(__name__)

class ConvNextImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values']

    def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, crop_pct: float=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, **kwargs) -> None:
        super().__init__(**kwargs)
        size = size if size is not None else {'shortest_edge': 384}
        size = get_size_dict(size, default_to_square=False)
        self.do_resize = do_resize
        self.size = size
        self.crop_pct = crop_pct if crop_pct is not None else 224 / 256
        self.resample = resample
        self.do_rescale = do_rescale
        self.rescale_factor = rescale_factor
        self.do_normalize = do_normalize
        self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD

    def resize(self, image: np.ndarray, size: Dict[str, int], crop_pct: float, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        size = get_size_dict(size, default_to_square=False)
        if 'shortest_edge' not in size:
            raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}")
        shortest_edge = size['shortest_edge']
        if shortest_edge < 384:
            resize_shortest_edge = int(shortest_edge / crop_pct)
            resize_size = get_resize_output_image_size(image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format)
            image = resize(image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
            return center_crop(image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs)
        else:
            return resize(image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)

    @filter_out_non_signature_kwargs()
    def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, crop_pct: float=None, resample: PILImageResampling=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
        do_resize = do_resize if do_resize is not None else self.do_resize
        crop_pct = crop_pct if crop_pct is not None else self.crop_pct
        resample = resample if resample is not None else self.resample
        do_rescale = do_rescale if do_rescale is not None else self.do_rescale
        rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
        do_normalize = do_normalize if do_normalize is not None else self.do_normalize
        image_mean = image_mean if image_mean is not None else self.image_mean
        image_std = image_std if image_std is not None else self.image_std
        size = size if size is not None else self.size
        size = get_size_dict(size, default_to_square=False)
        images = make_list_of_images(images)
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        if do_resize:
            images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format) for image in images]
        if do_rescale:
            images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
        if do_normalize:
            images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
        images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
        data = {'pixel_values': images}
        return BatchFeature(data=data, tensor_type=return_tensors)

# File: transformers-main/src/transformers/models/convnext/modeling_convnext.py
""""""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_convnext import ConvNextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'ConvNextConfig'
_CHECKPOINT_FOR_DOC = 'facebook/convnext-tiny-224'
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
_IMAGE_CLASS_CHECKPOINT = 'facebook/convnext-tiny-224'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor:
    if drop_prob == 0.0 or not training:
        return input
    keep_prob = 1 - drop_prob
    shape = (input.shape[0],) + (1,) * (input.ndim - 1)
    random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
    random_tensor.floor_()
    output = input.div(keep_prob) * random_tensor
    return output

class ConvNextDropPath(nn.Module):

    def __init__(self, drop_prob: Optional[float]=None) -> None:
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        return drop_path(hidden_states, self.drop_prob, self.training)

    def extra_repr(self) -> str:
        return 'p={}'.format(self.drop_prob)

class ConvNextLayerNorm(nn.Module):

    def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.eps = eps
        self.data_format = data_format
        if self.data_format not in ['channels_last', 'channels_first']:
            raise NotImplementedError(f'Unsupported data format: {self.data_format}')
        self.normalized_shape = (normalized_shape,)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.data_format == 'channels_last':
            x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
        elif self.data_format == 'channels_first':
            input_dtype = x.dtype
            x = x.float()
            u = x.mean(1, keepdim=True)
            s = (x - u).pow(2).mean(1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.eps)
            x = x.to(dtype=input_dtype)
            x = self.weight[:, None, None] * x + self.bias[:, None, None]
        return x

class ConvNextEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.patch_embeddings = nn.Conv2d(config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size)
        self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-06, data_format='channels_first')
        self.num_channels = config.num_channels

    def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
        num_channels = pixel_values.shape[1]
        if num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        embeddings = self.patch_embeddings(pixel_values)
        embeddings = self.layernorm(embeddings)
        return embeddings

class ConvNextLayer(nn.Module):

    def __init__(self, config, dim, drop_path=0):
        super().__init__()
        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
        self.layernorm = ConvNextLayerNorm(dim, eps=1e-06)
        self.pwconv1 = nn.Linear(dim, 4 * dim)
        self.act = ACT2FN[config.hidden_act]
        self.pwconv2 = nn.Linear(4 * dim, dim)
        self.layer_scale_parameter = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True) if config.layer_scale_init_value > 0 else None
        self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity()

    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
        input = hidden_states
        x = self.dwconv(hidden_states)
        x = x.permute(0, 2, 3, 1)
        x = self.layernorm(x)
        x = self.pwconv1(x)
        x = self.act(x)
        x = self.pwconv2(x)
        if self.layer_scale_parameter is not None:
            x = self.layer_scale_parameter * x
        x = x.permute(0, 3, 1, 2)
        x = input + self.drop_path(x)
        return x

class ConvNextStage(nn.Module):

    def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
        super().__init__()
        if in_channels != out_channels or stride > 1:
            self.downsampling_layer = nn.Sequential(ConvNextLayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))
        else:
            self.downsampling_layer = nn.Identity()
        drop_path_rates = drop_path_rates or [0.0] * depth
        self.layers = nn.Sequential(*[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)])

    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
        hidden_states = self.downsampling_layer(hidden_states)
        hidden_states = self.layers(hidden_states)
        return hidden_states

class ConvNextEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.stages = nn.ModuleList()
        drop_path_rates = [x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)]
        prev_chs = config.hidden_sizes[0]
        for i in range(config.num_stages):
            out_chs = config.hidden_sizes[i]
            stage = ConvNextStage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i])
            self.stages.append(stage)
            prev_chs = out_chs

    def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, BaseModelOutputWithNoAttention]:
        all_hidden_states = () if output_hidden_states else None
        for (i, layer_module) in enumerate(self.stages):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            hidden_states = layer_module(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
        return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)

class ConvNextPreTrainedModel(PreTrainedModel):
    config_class = ConvNextConfig
    base_model_prefix = 'convnext'
    main_input_name = 'pixel_values'
    _no_split_modules = ['ConvNextLayer']

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, nn.Conv2d)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
CONVNEXT_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVNEXT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`ConvNextImageProcessor.__call__`] for details.\n\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare ConvNext model outputting raw features without any specific head on top.', CONVNEXT_START_DOCSTRING)
class ConvNextModel(ConvNextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.embeddings = ConvNextEmbeddings(config)
        self.encoder = ConvNextEncoder(config)
        self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: torch.FloatTensor=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        embedding_output = self.embeddings(pixel_values)
        encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
        last_hidden_state = encoder_outputs[0]
        pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
        if not return_dict:
            return (last_hidden_state, pooled_output) + encoder_outputs[1:]
        return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)

@add_start_docstrings('\n    ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n    ImageNet.\n    ', CONVNEXT_START_DOCSTRING)
class ConvNextForImageClassification(ConvNextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.convnext = ConvNextModel(config)
        self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: torch.FloatTensor=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

@add_start_docstrings('\n    ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.\n    ', CONVNEXT_START_DOCSTRING)
class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin):

    def __init__(self, config):
        super().__init__(config)
        super()._init_backbone(config)
        self.embeddings = ConvNextEmbeddings(config)
        self.encoder = ConvNextEncoder(config)
        self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
        hidden_states_norms = {}
        for (stage, num_channels) in zip(self._out_features, self.channels):
            hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format='channels_first')
        self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        embedding_output = self.embeddings(pixel_values)
        outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=return_dict)
        hidden_states = outputs.hidden_states if return_dict else outputs[1]
        feature_maps = ()
        for (stage, hidden_state) in zip(self.stage_names, hidden_states):
            if stage in self.out_features:
                hidden_state = self.hidden_states_norms[stage](hidden_state)
                feature_maps += (hidden_state,)
        if not return_dict:
            output = (feature_maps,)
            if output_hidden_states:
                output += (hidden_states,)
            return output
        return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states if output_hidden_states else None, attentions=None)

# File: transformers-main/src/transformers/models/convnext/modeling_tf_convnext.py
""""""
from __future__ import annotations
from typing import List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_convnext import ConvNextConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'ConvNextConfig'
_CHECKPOINT_FOR_DOC = 'facebook/convnext-tiny-224'

class TFConvNextDropPath(keras.layers.Layer):

    def __init__(self, drop_path: float, **kwargs):
        super().__init__(**kwargs)
        self.drop_path = drop_path

    def call(self, x: tf.Tensor, training=None):
        if training:
            keep_prob = 1 - self.drop_path
            shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
            random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
            random_tensor = tf.floor(random_tensor)
            return x / keep_prob * random_tensor
        return x

class TFConvNextEmbeddings(keras.layers.Layer):

    def __init__(self, config: ConvNextConfig, **kwargs):
        super().__init__(**kwargs)
        self.patch_embeddings = keras.layers.Conv2D(filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name='patch_embeddings', kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros())
        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')
        self.num_channels = config.num_channels
        self.config = config

    def call(self, pixel_values):
        if isinstance(pixel_values, dict):
            pixel_values = pixel_values['pixel_values']
        tf.debugging.assert_equal(shape_list(pixel_values)[1], self.num_channels, message='Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
        embeddings = self.patch_embeddings(pixel_values)
        embeddings = self.layernorm(embeddings)
        return embeddings

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'patch_embeddings', None) is not None:
            with tf.name_scope(self.patch_embeddings.name):
                self.patch_embeddings.build([None, None, None, self.config.num_channels])
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])

class TFConvNextLayer(keras.layers.Layer):

    def __init__(self, config, dim, drop_path=0.0, **kwargs):
        super().__init__(**kwargs)
        self.dim = dim
        self.config = config
        self.dwconv = keras.layers.Conv2D(filters=dim, kernel_size=7, padding='same', groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='dwconv')
        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')
        self.pwconv1 = keras.layers.Dense(units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv1')
        self.act = get_tf_activation(config.hidden_act)
        self.pwconv2 = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv2')
        self.drop_path = TFConvNextDropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')

    def build(self, input_shape: tf.TensorShape=None):
        self.layer_scale_parameter = self.add_weight(shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_parameter') if self.config.layer_scale_init_value > 0 else None
        if self.built:
            return
        self.built = True
        if getattr(self, 'dwconv', None) is not None:
            with tf.name_scope(self.dwconv.name):
                self.dwconv.build([None, None, None, self.dim])
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, None, self.dim])
        if getattr(self, 'pwconv1', None) is not None:
            with tf.name_scope(self.pwconv1.name):
                self.pwconv1.build([None, None, self.dim])
        if getattr(self, 'pwconv2', None) is not None:
            with tf.name_scope(self.pwconv2.name):
                self.pwconv2.build([None, None, 4 * self.dim])
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)

    def call(self, hidden_states, training=False):
        input = hidden_states
        x = self.dwconv(hidden_states)
        x = self.layernorm(x)
        x = self.pwconv1(x)
        x = self.act(x)
        x = self.pwconv2(x)
        if self.layer_scale_parameter is not None:
            x = self.layer_scale_parameter * x
        x = input + self.drop_path(x, training=training)
        return x

class TFConvNextStage(keras.layers.Layer):

    def __init__(self, config: ConvNextConfig, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):
        super().__init__(**kwargs)
        if in_channels != out_channels or stride > 1:
            self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]
        else:
            self.downsampling_layer = [tf.identity]
        drop_path_rates = drop_path_rates or [0.0] * depth
        self.layers = [TFConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride

    def call(self, hidden_states):
        for layer in self.downsampling_layer:
            hidden_states = layer(hidden_states)
        for layer in self.layers:
            hidden_states = layer(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'layers', None) is not None:
            for layer in self.layers:
                with tf.name_scope(layer.name):
                    layer.build(None)
        if self.in_channels != self.out_channels or self.stride > 1:
            with tf.name_scope(self.downsampling_layer[0].name):
                self.downsampling_layer[0].build([None, None, None, self.in_channels])
            with tf.name_scope(self.downsampling_layer[1].name):
                self.downsampling_layer[1].build([None, None, None, self.in_channels])

class TFConvNextEncoder(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.stages = []
        drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
        drop_path_rates = tf.split(drop_path_rates, config.depths)
        drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
        prev_chs = config.hidden_sizes[0]
        for i in range(config.num_stages):
            out_chs = config.hidden_sizes[i]
            stage = TFConvNextStage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f'stages.{i}')
            self.stages.append(stage)
            prev_chs = out_chs

    def call(self, hidden_states, output_hidden_states=False, return_dict=True):
        all_hidden_states = () if output_hidden_states else None
        for (i, layer_module) in enumerate(self.stages):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            hidden_states = layer_module(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)

    def build(self, input_shape=None):
        for stage in self.stages:
            with tf.name_scope(stage.name):
                stage.build(None)

@keras_serializable
class TFConvNextMainLayer(keras.layers.Layer):
    config_class = ConvNextConfig

    def __init__(self, config: ConvNextConfig, add_pooling_layer: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embeddings = TFConvNextEmbeddings(config, name='embeddings')
        self.encoder = TFConvNextEncoder(config, name='encoder')
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm')
        self.pooler = keras.layers.GlobalAvgPool2D(data_format='channels_first') if add_pooling_layer else None

    @unpack_inputs
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        embedding_output = self.embeddings(pixel_values, training=training)
        encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        last_hidden_state = encoder_outputs[0]
        last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
        pooled_output = self.layernorm(self.pooler(last_hidden_state))
        if output_hidden_states:
            hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
        if not return_dict:
            hidden_states = hidden_states if output_hidden_states else ()
            return (last_hidden_state, pooled_output) + hidden_states
        return TFBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, self.config.hidden_sizes[-1]])

class TFConvNextPreTrainedModel(TFPreTrainedModel):
    config_class = ConvNextConfig
    base_model_prefix = 'convnext'
    main_input_name = 'pixel_values'
CONVNEXT_START_DOCSTRING = '\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Parameters:\n        config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVNEXT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`ConvNextImageProcessor.__call__`] for details.\n\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n            used instead.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n            eager mode, in graph mode the value will always be set to True.\n'

@add_start_docstrings('The bare ConvNext model outputting raw features without any specific head on top.', CONVNEXT_START_DOCSTRING)
class TFConvNextModel(TFConvNextPreTrainedModel):

    def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name='convnext')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        outputs = self.convnext(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        if not return_dict:
            return (outputs[0],) + outputs[1:]
        return TFBaseModelOutputWithPooling(last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convnext', None) is not None:
            with tf.name_scope(self.convnext.name):
                self.convnext.build(None)

@add_start_docstrings('\n    ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n    ImageNet.\n    ', CONVNEXT_START_DOCSTRING)
class TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: ConvNextConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.convnext = TFConvNextMainLayer(config, name='convnext')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        outputs = self.convnext(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convnext', None) is not None:
            with tf.name_scope(self.convnext.name):
                self.convnext.build(None)
        if getattr(self, 'classifier', None) is not None:
            if hasattr(self.classifier, 'name'):
                with tf.name_scope(self.classifier.name):
                    self.classifier.build([None, None, self.config.hidden_sizes[-1]])

# File: transformers-main/src/transformers/models/convnextv2/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_tf_available
_import_structure = {'configuration_convnextv2': ['ConvNextV2Config']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_convnextv2'] = ['ConvNextV2ForImageClassification', 'ConvNextV2Model', 'ConvNextV2PreTrainedModel', 'ConvNextV2Backbone']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_convnextv2'] = ['TFConvNextV2ForImageClassification', 'TFConvNextV2Model', 'TFConvNextV2PreTrainedModel']
if TYPE_CHECKING:
    from .configuration_convnextv2 import ConvNextV2Config
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_convnextv2 import ConvNextV2Backbone, ConvNextV2ForImageClassification, ConvNextV2Model, ConvNextV2PreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_convnextv2 import TFConvNextV2ForImageClassification, TFConvNextV2Model, TFConvNextV2PreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure)

# File: transformers-main/src/transformers/models/convnextv2/configuration_convnextv2.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
logger = logging.get_logger(__name__)

class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
    model_type = 'convnextv2'

    def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs):
        super().__init__(**kwargs)
        self.num_channels = num_channels
        self.patch_size = patch_size
        self.num_stages = num_stages
        self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
        self.depths = [3, 3, 9, 3] if depths is None else depths
        self.hidden_act = hidden_act
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.drop_path_rate = drop_path_rate
        self.image_size = image_size
        self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)]
        (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)

# File: transformers-main/src/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py
""""""
import argparse
import json
import os
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextImageProcessor, ConvNextV2Config, ConvNextV2ForImageClassification
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def get_convnextv2_config(checkpoint_url):
    config = ConvNextV2Config()
    if 'atto' in checkpoint_url:
        depths = [2, 2, 6, 2]
        hidden_sizes = [40, 80, 160, 320]
    if 'femto' in checkpoint_url:
        depths = [2, 2, 6, 2]
        hidden_sizes = [48, 96, 192, 384]
    if 'pico' in checkpoint_url:
        depths = [2, 2, 6, 2]
        hidden_sizes = [64, 128, 256, 512]
    if 'nano' in checkpoint_url:
        depths = [2, 2, 8, 2]
        hidden_sizes = [80, 160, 320, 640]
    if 'tiny' in checkpoint_url:
        depths = [3, 3, 9, 3]
        hidden_sizes = [96, 192, 384, 768]
    if 'base' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [128, 256, 512, 1024]
    if 'large' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [192, 384, 768, 1536]
    if 'huge' in checkpoint_url:
        depths = [3, 3, 27, 3]
        hidden_sizes = [352, 704, 1408, 2816]
    num_labels = 1000
    filename = 'imagenet-1k-id2label.json'
    expected_shape = (1, 1000)
    repo_id = 'huggingface/label-files'
    config.num_labels = num_labels
    id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
    id2label = {int(k): v for (k, v) in id2label.items()}
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    config.hidden_sizes = hidden_sizes
    config.depths = depths
    return (config, expected_shape)

def rename_key(name):
    if 'downsample_layers.0.0' in name:
        name = name.replace('downsample_layers.0.0', 'embeddings.patch_embeddings')
    if 'downsample_layers.0.1' in name:
        name = name.replace('downsample_layers.0.1', 'embeddings.norm')
    if 'downsample_layers.1.0' in name:
        name = name.replace('downsample_layers.1.0', 'stages.1.downsampling_layer.0')
    if 'downsample_layers.1.1' in name:
        name = name.replace('downsample_layers.1.1', 'stages.1.downsampling_layer.1')
    if 'downsample_layers.2.0' in name:
        name = name.replace('downsample_layers.2.0', 'stages.2.downsampling_layer.0')
    if 'downsample_layers.2.1' in name:
        name = name.replace('downsample_layers.2.1', 'stages.2.downsampling_layer.1')
    if 'downsample_layers.3.0' in name:
        name = name.replace('downsample_layers.3.0', 'stages.3.downsampling_layer.0')
    if 'downsample_layers.3.1' in name:
        name = name.replace('downsample_layers.3.1', 'stages.3.downsampling_layer.1')
    if 'stages' in name and 'downsampling_layer' not in name:
        name = name[:len('stages.0')] + '.layers' + name[len('stages.0'):]
    if 'gamma' in name:
        name = name.replace('gamma', 'weight')
    if 'beta' in name:
        name = name.replace('beta', 'bias')
    if 'stages' in name:
        name = name.replace('stages', 'encoder.stages')
    if 'norm' in name:
        name = name.replace('norm', 'layernorm')
    if 'head' in name:
        name = name.replace('head', 'classifier')
    return name

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

def convert_preprocessor(checkpoint_url):
    if '224' in checkpoint_url:
        size = 224
        crop_pct = 224 / 256
    elif '384' in checkpoint_url:
        size = 384
        crop_pct = None
    else:
        size = 512
        crop_pct = None
    return ConvNextImageProcessor(size=size, crop_pct=crop_pct, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225], resample=PILImageResampling.BICUBIC)

@torch.no_grad()
def convert_convnextv2_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub):
    print('Downloading original model from checkpoint...')
    (config, expected_shape) = get_convnextv2_config(checkpoint_url)
    state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)['model']
    print('Converting model parameters...')
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        state_dict[rename_key(key)] = val
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        if not key.startswith('classifier'):
            key = 'convnextv2.' + key
        state_dict[key] = val
    model = ConvNextV2ForImageClassification(config)
    model.load_state_dict(state_dict)
    model.eval()
    preprocessor = convert_preprocessor(checkpoint_url)
    inputs = preprocessor(images=prepare_img(), return_tensors='pt')
    logits = model(**inputs).logits
    if checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.393, 0.1747, -0.5246, 0.4177, 0.4295])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.1727, -0.5341, -0.7818, -0.4745, -0.6566])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.0333, 0.1563, -0.9137, 0.1054, 0.0381])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.1744, -0.1555, -0.0713, 0.095, -0.1431])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt':
        expected_logits = torch.tensor([0.9996, 0.1966, -0.4386, -0.3472, 0.6661])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.2553, -0.6708, -0.1359, 0.2518, -0.2488])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.0673, -0.5627, -0.3753, -0.2722, 0.0178])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt':
        expected_logits = torch.tensor([-0.6377, -0.7458, -0.215, 0.1184, -0.0597])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt':
        expected_logits = torch.tensor([1.0799, 0.2322, -0.886, 1.0219, 0.6231])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt':
        expected_logits = torch.tensor([0.3766, 0.4917, -1.1426, 0.9942, 0.6024])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt':
        expected_logits = torch.tensor([0.422, -0.6919, -0.4317, -0.2881, -0.6609])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt':
        expected_logits = torch.tensor([0.1082, -0.8286, -0.5095, 0.4681, -0.8085])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt':
        expected_logits = torch.tensor([-0.2419, -0.6221, 0.2176, -0.098, -0.7527])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt':
        expected_logits = torch.tensor([0.0391, -0.4371, 0.3786, 0.1251, -0.2784])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt':
        expected_logits = torch.tensor([-0.0504, 0.5636, -0.1729, -0.6507, -0.3949])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt':
        expected_logits = torch.tensor([0.356, 0.9486, 0.3149, -0.2667, -0.5138])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt':
        expected_logits = torch.tensor([-0.2469, -0.455, -0.5853, -0.081, 0.0309])
    elif checkpoint_url == 'https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt':
        expected_logits = torch.tensor([-0.309, 0.0802, -0.0682, -0.1979, -0.2826])
    else:
        raise ValueError(f'Unknown URL: {checkpoint_url}')
    assert torch.allclose(logits[0, :5], expected_logits, atol=0.001)
    assert logits.shape == expected_shape
    print('Model outputs match the original results!')
    if save_model:
        print('Saving model to local...')
        if not os.path.isdir(pytorch_dump_folder_path):
            os.mkdir(pytorch_dump_folder_path)
        model.save_pretrained(pytorch_dump_folder_path)
        preprocessor.save_pretrained(pytorch_dump_folder_path)
    model_name = 'convnextv2'
    if 'atto' in checkpoint_url:
        model_name += '-atto'
    if 'femto' in checkpoint_url:
        model_name += '-femto'
    if 'pico' in checkpoint_url:
        model_name += '-pico'
    if 'nano' in checkpoint_url:
        model_name += '-nano'
    elif 'tiny' in checkpoint_url:
        model_name += '-tiny'
    elif 'base' in checkpoint_url:
        model_name += '-base'
    elif 'large' in checkpoint_url:
        model_name += '-large'
    elif 'huge' in checkpoint_url:
        model_name += '-huge'
    if '22k' in checkpoint_url and '1k' not in checkpoint_url:
        model_name += '-22k'
    elif '22k' in checkpoint_url and '1k' in checkpoint_url:
        model_name += '-22k-1k'
    elif '1k' in checkpoint_url:
        model_name += '-1k'
    if '224' in checkpoint_url:
        model_name += '-224'
    elif '384' in checkpoint_url:
        model_name += '-384'
    elif '512' in checkpoint_url:
        model_name += '-512'
    if push_to_hub:
        print(f'Pushing {model_name} to the hub...')
        model.push_to_hub(model_name)
        preprocessor.push_to_hub(model_name)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_url', default='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', type=str, help="URL of the original ConvNeXTV2 checkpoint you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default='model', type=str, help='Path to the output PyTorch model directory.')
    parser.add_argument('--save_model', action='store_true', help='Save model to local')
    parser.add_argument('--push_to_hub', action='store_true', help='Push model and image preprocessor to the hub')
    args = parser.parse_args()
    convert_convnextv2_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)

# File: transformers-main/src/transformers/models/convnextv2/modeling_convnextv2.py
""""""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_convnextv2 import ConvNextV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'ConvNextV2Config'
_CHECKPOINT_FOR_DOC = 'facebook/convnextv2-tiny-1k-224'
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
_IMAGE_CLASS_CHECKPOINT = 'facebook/convnextv2-tiny-1k-224'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor:
    if drop_prob == 0.0 or not training:
        return input
    keep_prob = 1 - drop_prob
    shape = (input.shape[0],) + (1,) * (input.ndim - 1)
    random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
    random_tensor.floor_()
    output = input.div(keep_prob) * random_tensor
    return output

class ConvNextV2DropPath(nn.Module):

    def __init__(self, drop_prob: Optional[float]=None) -> None:
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        return drop_path(hidden_states, self.drop_prob, self.training)

    def extra_repr(self) -> str:
        return 'p={}'.format(self.drop_prob)

class ConvNextV2GRN(nn.Module):

    def __init__(self, dim: int):
        super().__init__()
        self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
        self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))

    def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
        global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
        norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-06)
        hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
        return hidden_states

class ConvNextV2LayerNorm(nn.Module):

    def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.eps = eps
        self.data_format = data_format
        if self.data_format not in ['channels_last', 'channels_first']:
            raise NotImplementedError(f'Unsupported data format: {self.data_format}')
        self.normalized_shape = (normalized_shape,)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.data_format == 'channels_last':
            x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
        elif self.data_format == 'channels_first':
            input_dtype = x.dtype
            x = x.float()
            u = x.mean(1, keepdim=True)
            s = (x - u).pow(2).mean(1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.eps)
            x = x.to(dtype=input_dtype)
            x = self.weight[:, None, None] * x + self.bias[:, None, None]
        return x

class ConvNextV2Embeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.patch_embeddings = nn.Conv2d(config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size)
        self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-06, data_format='channels_first')
        self.num_channels = config.num_channels

    def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
        num_channels = pixel_values.shape[1]
        if num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        embeddings = self.patch_embeddings(pixel_values)
        embeddings = self.layernorm(embeddings)
        return embeddings

class ConvNextV2Layer(nn.Module):

    def __init__(self, config, dim, drop_path=0):
        super().__init__()
        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
        self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-06)
        self.pwconv1 = nn.Linear(dim, 4 * dim)
        self.act = ACT2FN[config.hidden_act]
        self.grn = ConvNextV2GRN(4 * dim)
        self.pwconv2 = nn.Linear(4 * dim, dim)
        self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()

    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
        input = hidden_states
        x = self.dwconv(hidden_states)
        x = x.permute(0, 2, 3, 1)
        x = self.layernorm(x)
        x = self.pwconv1(x)
        x = self.act(x)
        x = self.grn(x)
        x = self.pwconv2(x)
        x = x.permute(0, 3, 1, 2)
        x = input + self.drop_path(x)
        return x

class ConvNextV2Stage(nn.Module):

    def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
        super().__init__()
        if in_channels != out_channels or stride > 1:
            self.downsampling_layer = nn.Sequential(ConvNextV2LayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))
        else:
            self.downsampling_layer = nn.Identity()
        drop_path_rates = drop_path_rates or [0.0] * depth
        self.layers = nn.Sequential(*[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)])

    def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
        hidden_states = self.downsampling_layer(hidden_states)
        hidden_states = self.layers(hidden_states)
        return hidden_states

class ConvNextV2Encoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.stages = nn.ModuleList()
        drop_path_rates = [x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)]
        prev_chs = config.hidden_sizes[0]
        for i in range(config.num_stages):
            out_chs = config.hidden_sizes[i]
            stage = ConvNextV2Stage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i])
            self.stages.append(stage)
            prev_chs = out_chs

    def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, BaseModelOutputWithNoAttention]:
        all_hidden_states = () if output_hidden_states else None
        for (i, layer_module) in enumerate(self.stages):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            hidden_states = layer_module(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
        return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)

class ConvNextV2PreTrainedModel(PreTrainedModel):
    config_class = ConvNextV2Config
    base_model_prefix = 'convnextv2'
    main_input_name = 'pixel_values'
    _no_split_modules = ['ConvNextV2Layer']

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, nn.Conv2d)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
CONVNEXTV2_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVNEXTV2_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See\n            [`ConvNextImageProcessor.__call__`] for details.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare ConvNextV2 model outputting raw features without any specific head on top.', CONVNEXTV2_START_DOCSTRING)
class ConvNextV2Model(ConvNextV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.embeddings = ConvNextV2Embeddings(config)
        self.encoder = ConvNextV2Encoder(config)
        self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: torch.FloatTensor=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        embedding_output = self.embeddings(pixel_values)
        encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
        last_hidden_state = encoder_outputs[0]
        pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
        if not return_dict:
            return (last_hidden_state, pooled_output) + encoder_outputs[1:]
        return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)

@add_start_docstrings('\n    ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n    ImageNet.\n    ', CONVNEXTV2_START_DOCSTRING)
class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.convnextv2 = ConvNextV2Model(config)
        self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: torch.FloatTensor=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

@add_start_docstrings('\n    ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.\n    ', CONVNEXTV2_START_DOCSTRING)
class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):

    def __init__(self, config):
        super().__init__(config)
        super()._init_backbone(config)
        self.embeddings = ConvNextV2Embeddings(config)
        self.encoder = ConvNextV2Encoder(config)
        self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
        hidden_states_norms = {}
        for (stage, num_channels) in zip(self._out_features, self.channels):
            hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format='channels_first')
        self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
        self.post_init()

    @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        embedding_output = self.embeddings(pixel_values)
        outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=return_dict)
        hidden_states = outputs.hidden_states if return_dict else outputs[1]
        feature_maps = ()
        for (stage, hidden_state) in zip(self.stage_names, hidden_states):
            if stage in self.out_features:
                hidden_state = self.hidden_states_norms[stage](hidden_state)
                feature_maps += (hidden_state,)
        if not return_dict:
            output = (feature_maps,)
            if output_hidden_states:
                output += (hidden_states,)
            return output
        return BackboneOutput(feature_maps=feature_maps, hidden_states=hidden_states if output_hidden_states else None, attentions=None)

# File: transformers-main/src/transformers/models/convnextv2/modeling_tf_convnextv2.py
""""""
from __future__ import annotations
from typing import List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPooling, TFBaseModelOutputWithPoolingAndNoAttention, TFImageClassifierOutputWithNoAttention
from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_convnextv2 import ConvNextV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'ConvNextV2Config'
_CHECKPOINT_FOR_DOC = 'facebook/convnextv2-tiny-1k-224'
_EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
_IMAGE_CLASS_CHECKPOINT = 'facebook/convnextv2-tiny-1k-224'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

class TFConvNextV2DropPath(keras.layers.Layer):

    def __init__(self, drop_path: float, **kwargs):
        super().__init__(**kwargs)
        self.drop_path = drop_path

    def call(self, x: tf.Tensor, training=None):
        if training:
            keep_prob = 1 - self.drop_path
            shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
            random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
            random_tensor = tf.floor(random_tensor)
            return x / keep_prob * random_tensor
        return x

class TFConvNextV2GRN(keras.layers.Layer):

    def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
        super().__init__(**kwargs)
        self.dim = dim

    def build(self, input_shape: tf.TensorShape=None):
        self.weight = self.add_weight(name='weight', shape=(1, 1, 1, self.dim), initializer=keras.initializers.Zeros())
        self.bias = self.add_weight(name='bias', shape=(1, 1, 1, self.dim), initializer=keras.initializers.Zeros())
        return super().build(input_shape)

    def call(self, hidden_states: tf.Tensor):
        global_features = tf.norm(hidden_states, ord='euclidean', axis=(1, 2), keepdims=True)
        norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-06)
        hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
        return hidden_states

class TFConvNextV2Embeddings(keras.layers.Layer):

    def __init__(self, config: ConvNextV2Config, **kwargs):
        super().__init__(**kwargs)
        self.patch_embeddings = keras.layers.Conv2D(filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name='patch_embeddings', kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros())
        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')
        self.num_channels = config.num_channels
        self.config = config

    def call(self, pixel_values):
        if isinstance(pixel_values, dict):
            pixel_values = pixel_values['pixel_values']
        tf.debugging.assert_equal(shape_list(pixel_values)[1], self.num_channels, message='Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
        embeddings = self.patch_embeddings(pixel_values)
        embeddings = self.layernorm(embeddings)
        return embeddings

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'patch_embeddings', None) is not None:
            with tf.name_scope(self.patch_embeddings.name):
                self.patch_embeddings.build([None, None, None, self.config.num_channels])
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])

class TFConvNextV2Layer(keras.layers.Layer):

    def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float=0.0, **kwargs):
        super().__init__(**kwargs)
        self.dim = dim
        self.config = config
        self.dwconv = keras.layers.Conv2D(filters=dim, kernel_size=7, padding='same', groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='dwconv')
        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')
        self.pwconv1 = keras.layers.Dense(units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='pwconv1')
        self.act = get_tf_activation(config.hidden_act)
        self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name='grn')
        self.pwconv2 = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='pwconv2')
        self.drop_path = TFConvNextV2DropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')

    def call(self, hidden_states, training=False):
        input = hidden_states
        x = self.dwconv(hidden_states)
        x = self.layernorm(x)
        x = self.pwconv1(x)
        x = self.act(x)
        x = self.grn(x)
        x = self.pwconv2(x)
        x = self.drop_path(x, training=training)
        x = input + x
        return x

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dwconv', None) is not None:
            with tf.name_scope(self.dwconv.name):
                self.dwconv.build([None, None, None, self.dim])
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, None, self.dim])
        if getattr(self, 'pwconv1', None) is not None:
            with tf.name_scope(self.pwconv1.name):
                self.pwconv1.build([None, None, self.dim])
        if getattr(self, 'grn', None) is not None:
            with tf.name_scope(self.grn.name):
                self.grn.build(None)
        if getattr(self, 'pwconv2', None) is not None:
            with tf.name_scope(self.pwconv2.name):
                self.pwconv2.build([None, None, 4 * self.dim])
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)

class TFConvNextV2Stage(keras.layers.Layer):

    def __init__(self, config: ConvNextV2Config, in_channels: int, out_channels: int, kernel_size: int=2, stride: int=2, depth: int=2, drop_path_rates: Optional[List[float]]=None, **kwargs):
        super().__init__(**kwargs)
        if in_channels != out_channels or stride > 1:
            self.downsampling_layer = [keras.layers.LayerNormalization(epsilon=1e-06, name='downsampling_layer.0'), keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, strides=stride, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='downsampling_layer.1')]
        else:
            self.downsampling_layer = [tf.identity]
        drop_path_rates = drop_path_rates or [0.0] * depth
        self.layers = [TFConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j], name=f'layers.{j}') for j in range(depth)]
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride

    def call(self, hidden_states):
        for layer in self.downsampling_layer:
            hidden_states = layer(hidden_states)
        for layer in self.layers:
            hidden_states = layer(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'layers', None) is not None:
            for layer in self.layers:
                with tf.name_scope(layer.name):
                    layer.build(None)
        if self.in_channels != self.out_channels or self.stride > 1:
            with tf.name_scope(self.downsampling_layer[0].name):
                self.downsampling_layer[0].build([None, None, None, self.in_channels])
            with tf.name_scope(self.downsampling_layer[1].name):
                self.downsampling_layer[1].build([None, None, None, self.in_channels])

class TFConvNextV2Encoder(keras.layers.Layer):

    def __init__(self, config: ConvNextV2Config, **kwargs):
        super().__init__(**kwargs)
        self.stages = []
        drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
        drop_path_rates = tf.split(drop_path_rates, config.depths)
        drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
        prev_chs = config.hidden_sizes[0]
        for i in range(config.num_stages):
            out_chs = config.hidden_sizes[i]
            stage = TFConvNextV2Stage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i], name=f'stages.{i}')
            self.stages.append(stage)
            prev_chs = out_chs

    def call(self, hidden_states: tf.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
        all_hidden_states = () if output_hidden_states else None
        for (i, layer_module) in enumerate(self.stages):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            hidden_states = layer_module(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
        return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)

    def build(self, input_shape=None):
        for stage in self.stages:
            with tf.name_scope(stage.name):
                stage.build(None)

@keras_serializable
class TFConvNextV2MainLayer(keras.layers.Layer):
    config_class = ConvNextV2Config

    def __init__(self, config: ConvNextV2Config, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embeddings = TFConvNextV2Embeddings(config, name='embeddings')
        self.encoder = TFConvNextV2Encoder(config, name='encoder')
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm')
        self.pooler = keras.layers.GlobalAvgPool2D(data_format='channels_last')

    @unpack_inputs
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        embedding_output = self.embeddings(pixel_values, training=training)
        encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        last_hidden_state = encoder_outputs[0]
        pooled_output = self.pooler(last_hidden_state)
        last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
        pooled_output = self.layernorm(pooled_output)
        if output_hidden_states:
            hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
        if not return_dict:
            hidden_states = hidden_states if output_hidden_states else ()
            return (last_hidden_state, pooled_output) + hidden_states
        return TFBaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, self.config.hidden_sizes[-1]])

class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
    config_class = ConvNextV2Config
    base_model_prefix = 'convnextv2'
    main_input_name = 'pixel_values'
CONVNEXTV2_START_DOCSTRING = '\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Parameters:\n        config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
CONVNEXTV2_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`ConvNextImageProcessor.__call__`] for details.\n\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n            used instead.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n            eager mode, in graph mode the value will always be set to `True`.\n'

@add_start_docstrings('The bare ConvNextV2 model outputting raw features without any specific head on top.', CONVNEXTV2_START_DOCSTRING)
class TFConvNextV2Model(TFConvNextV2PreTrainedModel):

    def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.convnextv2 = TFConvNextV2MainLayer(config, name='convnextv2')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        outputs = self.convnextv2(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        if not return_dict:
            return outputs[:]
        return TFBaseModelOutputWithPoolingAndNoAttention(last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convnextv2', None) is not None:
            with tf.name_scope(self.convnextv2.name):
                self.convnextv2.build(None)

@add_start_docstrings('\n    ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n    ImageNet.\n    ', CONVNEXTV2_START_DOCSTRING)
class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.convnextv2 = TFConvNextV2MainLayer(config, name='convnextv2')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), bias_initializer=keras.initializers.Zeros(), name='classifier')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convnextv2', None) is not None:
            with tf.name_scope(self.convnextv2.name):
                self.convnextv2.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_sizes[-1]])

# File: transformers-main/src/transformers/models/cpm/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
_import_structure = {}
try:
    if not is_sentencepiece_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_cpm'] = ['CpmTokenizer']
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_cpm_fast'] = ['CpmTokenizerFast']
if TYPE_CHECKING:
    try:
        if not is_sentencepiece_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_cpm import CpmTokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_cpm_fast import CpmTokenizerFast
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/cpm/tokenization_cpm.py
""""""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'}

class CpmTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES

    def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='', eos_token='', unk_token='', sep_token='', pad_token='', cls_token='', mask_token='', additional_special_tokens=['', ''], sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
        mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
        self.do_lower_case = do_lower_case
        self.remove_space = remove_space
        self.keep_accents = keep_accents
        self.vocab_file = vocab_file
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(vocab_file)
        try:
            import jieba
        except ModuleNotFoundError as error:
            raise error.__class__('You need to install jieba to use CpmTokenizer or CpmTokenizerFast. See https://pypi.org/project/jieba/ for installation.')
        self.jieba = jieba
        self.translator = str.maketrans(' \n', '▂▃')
        super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
        self._pad_token_type_id = 3

    @property
    def vocab_size(self):
        return len(self.sp_model)

    def get_vocab(self):
        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
        vocab.update(self.added_tokens_encoder)
        return vocab

    def __getstate__(self):
        state = self.__dict__.copy()
        state['sp_model'] = None
        return state

    def __setstate__(self, d):
        self.__dict__ = d
        if not hasattr(self, 'sp_model_kwargs'):
            self.sp_model_kwargs = {}
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(self.vocab_file)

    def preprocess_text(self, inputs):
        if self.remove_space:
            outputs = ' '.join(inputs.strip().split())
        else:
            outputs = inputs
        outputs = outputs.replace('``', '"').replace("''", '"')
        if not self.keep_accents:
            outputs = unicodedata.normalize('NFKD', outputs)
            outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
        if self.do_lower_case:
            outputs = outputs.lower()
        return outputs

    def _tokenize(self, text: str) -> List[str]:
        text = self.preprocess_text(text)
        pieces = self.sp_model.encode(text, out_type=str)
        new_pieces = []
        for piece in pieces:
            if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
                cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ''))
                if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
                    if len(cur_pieces[0]) == 1:
                        cur_pieces = cur_pieces[1:]
                    else:
                        cur_pieces[0] = cur_pieces[0][1:]
                cur_pieces.append(piece[-1])
                new_pieces.extend(cur_pieces)
            else:
                new_pieces.append(piece)
        return new_pieces

    def _convert_token_to_id(self, token):
        return self.sp_model.PieceToId(token)

    def _convert_id_to_token(self, index):
        return self.sp_model.IdToPiece(index)

    def convert_tokens_to_string(self, tokens):
        out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
        return out_string

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return token_ids_0 + sep + cls
        return token_ids_0 + sep + token_ids_1 + sep + cls

    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is not None:
            return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1, 1]
        return [0] * len(token_ids_0) + [1, 1]

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls_segment_id = [2]
        if token_ids_1 is None:
            return len(token_ids_0 + sep) * [0] + cls_segment_id
        return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        elif not os.path.isfile(self.vocab_file):
            with open(out_vocab_file, 'wb') as fi:
                content_spiece_model = self.sp_model.serialized_model_proto()
                fi.write(content_spiece_model)
        return (out_vocab_file,)

    def _decode(self, *args, **kwargs):
        text = super()._decode(*args, **kwargs)
        text = text.replace(' ', '').replace('▂', ' ').replace('▃', '\n')
        return text

# File: transformers-main/src/transformers/models/cpm/tokenization_cpm_fast.py
""""""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}

class CpmTokenizerFast(PreTrainedTokenizerFast):

    def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='', eos_token='', unk_token='', sep_token='', pad_token='', cls_token='', mask_token='', additional_special_tokens=['', ''], **kwargs):
        mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
        super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
        self._pad_token_type_id = 3
        self.do_lower_case = do_lower_case
        self.remove_space = remove_space
        self.keep_accents = keep_accents
        self.vocab_file = vocab_file
        try:
            import jieba
        except ModuleNotFoundError as error:
            raise error.__class__('You need to install jieba to use CpmTokenizer or CpmTokenizerFast. See https://pypi.org/project/jieba/ for installation.')
        self.jieba = jieba
        self.translator = str.maketrans(' \n', '▂▃')

    @property
    def can_save_slow_tokenizer(self) -> bool:
        return os.path.isfile(self.vocab_file) if self.vocab_file else False

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return token_ids_0 + sep + cls
        return token_ids_0 + sep + token_ids_1 + sep + cls

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls_segment_id = [2]
        if token_ids_1 is None:
            return len(token_ids_0 + sep) * [0] + cls_segment_id
        return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not self.can_save_slow_tokenizer:
            raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        return (out_vocab_file,)

    def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
        batch_text_or_text_pairs = [' '.join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)]) for text in batch_text_or_text_pairs]
        return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)

    def _decode(self, *args, **kwargs):
        text = super()._decode(*args, **kwargs)
        text = text.replace(' ', '').replace('▂', ' ').replace('▃', '\n')
        return text

# File: transformers-main/src/transformers/models/cpmant/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_cpmant': ['CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_cpmant'] = ['CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_cpmant import CpmAntConfig
    from .tokenization_cpmant import CpmAntTokenizer
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_cpmant import CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/cpmant/configuration_cpmant.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class CpmAntConfig(PretrainedConfig):
    model_type = 'cpmant'

    def __init__(self, vocab_size: int=30720, hidden_size: int=4096, num_attention_heads: int=32, dim_head: int=128, dim_ff: int=10240, num_hidden_layers: int=48, dropout_p: int=0.0, position_bias_num_buckets: int=512, position_bias_max_distance: int=2048, eps: int=1e-06, init_std: float=1.0, prompt_types: int=32, prompt_length: int=32, segment_types: int=32, use_cache: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.prompt_types = prompt_types
        self.prompt_length = prompt_length
        self.segment_types = segment_types
        self.hidden_size = hidden_size
        self.num_attention_heads = num_attention_heads
        self.dim_head = dim_head
        self.dim_ff = dim_ff
        self.num_hidden_layers = num_hidden_layers
        self.position_bias_num_buckets = position_bias_num_buckets
        self.position_bias_max_distance = position_bias_max_distance
        self.dropout_p = dropout_p
        self.eps = eps
        self.use_cache = use_cache
        self.vocab_size = vocab_size
        self.init_std = init_std

# File: transformers-main/src/transformers/models/cpmant/modeling_cpmant.py
""""""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_cpmant import CpmAntConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'openbmb/cpm-ant-10b'
_CONFIG_FOR_DOC = 'CpmAntConfig'

class CpmAntLayerNorm(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.eps = config.eps
        self.dim_norm = config.hidden_size
        self.weight = nn.Parameter(torch.empty(config.hidden_size))

    def forward(self, hidden_states: torch.Tensor):
        if hidden_states.size(-1) != self.dim_norm:
            raise AssertionError('hidden_states.size(-1) != self.dim_norm')
        old_dtype = hidden_states.dtype
        variance = hidden_states.to(torch.float32).pow(2).mean(dim=-1, keepdim=True)
        hidden_states = (hidden_states * torch.rsqrt(variance + self.eps)).to(old_dtype) * self.weight
        return hidden_states

class CpmAntAttention(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.dim_model = config.hidden_size
        self.num_heads = config.num_attention_heads
        self.dim_head = config.dim_head
        self.project_q = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
        self.project_k = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
        self.project_v = nn.Linear(self.dim_model, self.num_heads * self.dim_head, bias=False)
        self.attention_out = nn.Linear(self.num_heads * self.dim_head, self.dim_model, bias=False)
        self.softmax = torch.nn.Softmax(dim=-1)
        if config.dropout_p is not None:
            self.dropout = torch.nn.Dropout(p=config.dropout_p)
        else:
            self.dropout = None

    def forward(self, hidden_q: torch.Tensor, hidden_kv: torch.Tensor, attention_mask: torch.BoolTensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):
        batch_size = hidden_q.size(0)
        len_q = hidden_q.size(1)
        len_k = hidden_kv.size(1)
        query = self.project_q(hidden_q)
        key = self.project_k(hidden_kv)
        value = self.project_v(hidden_kv)
        query = query.view(batch_size, len_q, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
        key = key.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
        value = value.view(batch_size, len_k, self.num_heads, self.dim_head).permute(0, 2, 1, 3)
        if past_key_values is not None:
            key = torch.cat([past_key_values[0], key], dim=-2)
            value = torch.cat([past_key_values[1], value], dim=-2)
            len_k = key.size(-2)
        score = torch.matmul(query, key.transpose(-1, -2)) / math.sqrt(self.dim_head)
        score = score + position_bias
        score = torch.masked_fill(score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(float('-inf'), device=score.device, dtype=score.dtype))
        score = self.softmax(score)
        score = torch.masked_fill(score, attention_mask.view(batch_size, 1, len_q, len_k) == torch.tensor(False), torch.scalar_tensor(0, device=score.device, dtype=score.dtype))
        if output_attentions:
            attn_weights = score
        else:
            attn_weights = None
        if self.dropout is not None:
            score = self.dropout(score)
        score = torch.matmul(score, value)
        score = score.view(batch_size, self.num_heads, len_q, self.dim_head).permute(0, 2, 1, 3)
        score = score.contiguous().view(batch_size, len_q, self.num_heads * self.dim_head)
        score = self.attention_out(score)
        past_key_values = None
        if use_cache:
            past_key_values = (key, value)
        return (score, attn_weights, past_key_values)

class CpmAntSelfAttentionBlock(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.layernorm_before_attention = CpmAntLayerNorm(config)
        self.self_attention = CpmAntAttention(config)
        if config.dropout_p:
            self.dropout = torch.nn.Dropout(config.dropout_p)
        else:
            self.dropout = None

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):
        outputs = self.layernorm_before_attention(hidden_states)
        outputs = self.self_attention(outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache)
        (outputs, attn_weights, current_key_value) = outputs
        if self.dropout is not None:
            outputs = self.dropout(outputs)
        hidden_states = hidden_states + outputs
        return (hidden_states, attn_weights, current_key_value)

class CpmAntDenseGatedACT(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.w_0 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
        self.w_1 = nn.Linear(config.hidden_size, config.dim_ff, bias=False)
        self.act = torch.nn.GELU()

    def forward(self, hidden_states: torch.Tensor):
        gate_score = self.act(self.w_0(hidden_states))
        hidden_states = self.w_1(hidden_states)
        hidden_states = gate_score * hidden_states
        return hidden_states

class CpmAntFeedForward(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.w_in = CpmAntDenseGatedACT(config)
        if config.dropout_p is not None:
            self.dropout = torch.nn.Dropout(config.dropout_p)
        else:
            self.dropout = None
        self.w_out = nn.Linear(config.dim_ff, config.hidden_size, bias=False)

    def forward(self, hidden_states: torch.Tensor):
        hidden_states = self.w_in(hidden_states)
        if self.dropout is not None:
            hidden_states = self.dropout(hidden_states)
        hidden_states = self.w_out(hidden_states)
        return hidden_states

class CpmAntFFNBlock(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.layernorm_before_ffn = CpmAntLayerNorm(config)
        self.ffn = CpmAntFeedForward(config)
        if config.dropout_p:
            self.dropout = torch.nn.Dropout(config.dropout_p)
        else:
            self.dropout = None

    def forward(self, hidden_states: torch.Tensor):
        ln_outputs = self.layernorm_before_ffn(hidden_states)
        outputs = self.ffn(ln_outputs)
        if self.dropout is not None:
            outputs = self.dropout(outputs)
        hidden_states = hidden_states + outputs
        return hidden_states

class CpmAntTransformerBlock(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.self_att = CpmAntSelfAttentionBlock(config)
        self.ffn = CpmAntFFNBlock(config)

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):
        hidden_states = self.self_att(hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, past_key_values=past_key_values, use_cache=use_cache)
        (hidden_states, attn_weights, current_key_value) = hidden_states
        hidden_states = self.ffn(hidden_states)
        return (hidden_states, attn_weights, current_key_value)

class CpmAntEncoder(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.num_layers = config.num_hidden_layers
        self.layers = nn.ModuleList([CpmAntTransformerBlock(config) for ith in range(self.num_layers)])
        self.output_layernorm = CpmAntLayerNorm(config)

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        current_key_values = () if use_cache else None
        for (i, layer) in enumerate(self.layers):
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            layer_outputs = layer(hidden_states, attention_mask, position_bias, output_attentions=output_attentions, past_key_values=past_key_values[i] if past_key_values else None, use_cache=use_cache)
            (hidden_states, attn_weights, current_key_value) = layer_outputs
            if output_attentions:
                all_self_attns += (attn_weights,)
            if current_key_value is not None:
                current_key_values = current_key_values + (current_key_value,)
        hidden_states = self.output_layernorm(hidden_states)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        return (hidden_states, current_key_values, all_hidden_states, all_self_attns)

class CpmAntIntermediate(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class CpmAntSegmentPositionEmbedding(nn.Module):

    def __init__(self, config: CpmAntConfig):
        super().__init__()
        self.num_heads = config.num_attention_heads
        self.num_buckets = config.position_bias_num_buckets
        self.max_distance = config.position_bias_max_distance
        self.num_segments = config.segment_types
        self.relative_attention_bias = nn.Parameter(torch.empty(config.segment_types * config.segment_types + config.position_bias_num_buckets, config.num_attention_heads))

    def forward(self, key_pos: torch.Tensor, query_pos: torch.Tensor, key_segment: torch.Tensor, query_segment: torch.Tensor):
        with torch.no_grad():
            batch = key_pos.size(0)
            keylen = key_pos.size(1)
            querylen = query_pos.size(1)
            if key_pos.size(0) != query_pos.size(0):
                raise AssertionError(f'key_pos.size(0) should be equal to query_pos.size(0), but got {key_pos.size(0)} and {query_pos.size(0)}!')
            if keylen != key_segment.size(1) or querylen != query_segment.size(1):
                raise AssertionError(f'keylen should be equal to key_segment.size(1), but got {keylen} and {key_segment.size(1)}!')
            if querylen != query_segment.size(1):
                raise AssertionError(f'querylen should be equal to query_segment.size(1), but got {querylen} and {query_segment.szie(1)}!')
            key_pos = key_pos.view(batch, -1, keylen)
            query_pos = query_pos.view(batch, querylen, -1)
            key_segment = key_segment.view(batch, -1, keylen)
            query_segment = query_segment.view(batch, querylen, -1)
            relative_position_bucket = self._segment_relative_position_bucket(query_segment, key_segment)
            relative_position_bucket = relative_position_bucket + self.num_buckets
            absolute_position_bucket = self._position_bucket(torch.arange(keylen, dtype=torch.int32, device=relative_position_bucket.device)[None, :] - torch.arange(querylen, dtype=torch.int32, device=relative_position_bucket.device)[:, None], num_buckets=self.num_buckets, max_distance=self.max_distance)
            relative_position_bucket = torch.where(key_segment == query_segment, absolute_position_bucket[None, :, :], relative_position_bucket)
        embeds = F.embedding(relative_position_bucket, self.relative_attention_bias)
        embeds = embeds.permute(0, 3, 1, 2).contiguous()
        return embeds

    def _segment_relative_position_bucket(self, query_segment, key_segment):
        return query_segment * self.num_segments + key_segment

    def _position_bucket(self, relative_position, num_buckets=32, max_distance=128):
        relative_buckets = 0
        num_buckets //= 2
        relative_buckets = (relative_position > 0).to(torch.int32) * num_buckets
        relative_position = torch.abs(relative_position)
        max_exact = num_buckets // 2
        is_small = relative_position < max_exact
        relative_postion_if_large = max_exact + (torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.int32)
        relative_postion_if_large = torch.min(relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1))
        relative_buckets += torch.where(is_small, relative_position.to(torch.int32), relative_postion_if_large)
        return relative_buckets

class CpmAntOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class CpmAntPreTrainedModel(PreTrainedModel):
    config_class = CpmAntConfig
    base_model_prefix = 'cpmant'

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.init_std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.init_std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        elif isinstance(module, CpmAntLayerNorm):
            module.weight.data.fill_(1.0)
        elif isinstance(module, CpmAntSegmentPositionEmbedding):
            module.relative_attention_bias.data.normal_(mean=0.0, std=self.config.init_std)
CPMANT_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters\n        config ([`~CpmAntConfig`]): Model configuration class with all the parameters of the\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CPMANT_INPUTS_DOCSTRING = '\n    Args:\n        input_ids (`torch.Tensor` of shape `(batch_size, seq_len)`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`CPMAntTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare CPMAnt Model outputting raw hidden-states without any specific head on top.', CPMANT_START_DOCSTRING)
class CpmAntModel(CpmAntPreTrainedModel):

    def __init__(self, config: CpmAntConfig):
        super().__init__(config)
        self.encoder = CpmAntEncoder(config)
        self.segment_embedding = nn.Embedding(config.segment_types, config.hidden_size)
        self.input_embedding = nn.Embedding(config.vocab_size + config.prompt_types * config.prompt_length, config.hidden_size)
        self.position_bias = CpmAntSegmentPositionEmbedding(config)
        self.prompt_length = config.prompt_length
        self.vocab_size = config.vocab_size
        self.post_init()

    def get_input_embeddings(self):
        return self.input_embedding

    def set_input_embeddings(self, embeddings, **kwargs):
        self.input_embedding = embeddings

    def _prepare_attention_mask(self, input_ids, span, context, length):
        batch = input_ids.size(0)
        seqlen = input_ids.size(1)
        device = input_ids.device
        directional_mask_2d = torch.arange(seqlen, device=device) <= torch.arange(seqlen, device=device).view(-1, 1)
        attention_mask = context[:, None, :] | context[:, :, None].logical_not() & directional_mask_2d.view(1, seqlen, seqlen)
        attention_mask = attention_mask & (span[:, None, :] == span[:, :, None])
        mask_1d = torch.tensor(list(range(seqlen - self.prompt_length))[::-1], device=device)[None, :].repeat(batch, 1) < length[:, None]
        mask_1d = torch.cat((torch.ones(batch, self.prompt_length, device=device).bool(), mask_1d), dim=1)
        attention_mask = mask_1d.view(batch, seqlen, 1) & mask_1d.view(batch, 1, seqlen) & attention_mask
        return attention_mask

    @add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        if input_ids.dtype != torch.int32:
            input_ids = input_ids.to(torch.int32)
        (dtype, device) = (input_ids.dtype, input_ids.device)
        segment = torch.where(input_ids != 0, 2, 0).to(dtype=dtype, device=device)
        length = (segment != 0).sum(-1).to(dtype=dtype, device=device)
        input_ids = torch.cat((torch.arange(self.prompt_length * 2 + self.vocab_size, self.prompt_length * 3 + self.vocab_size, dtype=dtype, device=device).repeat(input_ids.size(0), 1), input_ids), dim=1)
        (batch, seq_length) = input_ids.size()
        segment = torch.cat((torch.zeros(batch, self.prompt_length, dtype=dtype, device=device), segment), dim=1)
        context = torch.full((batch, seq_length), 1, dtype=dtype, device=device)
        position = torch.arange(seq_length, dtype=dtype, device=device).repeat(batch, 1)
        span = torch.full((batch, seq_length), 0, dtype=dtype, device=device)
        if past_key_values is None:
            past_length = 0
            past_key_values = tuple([None] * self.encoder.num_layers)
            input_ids = input_ids.contiguous()
            hidden_states = self.input_embedding(input_ids)
            segment_states = self.segment_embedding(segment)
            hidden_states = hidden_states + segment_states
        else:
            past_length = past_key_values[0][0].size(-2)
            segment_states = self.segment_embedding(segment)
            hidden_states = self.input_embedding(input_ids) + segment_states[:, -1:, :]
        attention_mask = self._prepare_attention_mask(input_ids, span, context, length)
        position_bias = self.position_bias(position, position, segment, segment)
        attention_mask = attention_mask[:, past_length:, :]
        position_bias = position_bias[:, :, past_length:, :]
        hidden_states = hidden_states[:, past_length:, :]
        (hidden_states, present_key_values, all_hidden_states, all_attentions) = self.encoder(hidden_states, attention_mask, position_bias, output_attentions, output_hidden_states, past_key_values, use_cache)
        if past_length == 0:
            hidden_states = hidden_states[:, self.prompt_length:, :]
            if all_attentions is not None:
                new_attentions = ()
                for attention in all_attentions:
                    new_attentions += (attention[:, :, self.prompt_length:, self.prompt_length:],)
                all_attentions = new_attentions
            if all_hidden_states is not None:
                new_hidden_states = ()
                for hidden_state in all_hidden_states:
                    new_hidden_states += (hidden_state[:, self.prompt_length:, :],)
                all_hidden_states = new_hidden_states
        if not return_dict:
            return tuple((v for v in [hidden_states, present_key_values, all_hidden_states, all_attentions] if v is not None))
        return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_attentions)

@add_start_docstrings('\n    The CPMAnt Model with a language modeling head on top (linear layer with weights tied to the input embeddings).\n    ', CPMANT_START_DOCSTRING)
class CpmAntForCausalLM(CpmAntPreTrainedModel):
    _tied_weights_keys = ['lm_head.weight']

    def __init__(self, config: CpmAntConfig):
        super().__init__(config)
        self.cpmant = CpmAntModel(config)
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size + config.prompt_types * config.prompt_length, bias=False)
        self.post_init()

    @add_start_docstrings_to_model_forward(CPMANT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> Union[Tuple, CausalLMOutputWithPast]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        model_output = self.cpmant(input_ids, output_attentions, output_hidden_states, past_key_values, use_cache, return_dict)
        hidden_states = model_output.last_hidden_state if return_dict else model_output[0]
        logits = self.lm_head(hidden_states)
        loss = None
        if labels is not None:
            loss_func = CrossEntropyLoss()
            loss = loss_func(logits.view(-1, logits.size(-1)), labels.view(-1))
        if not return_dict:
            output = (logits,) + model_output[1:]
            return (loss,) + output if loss is not None else output
        return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=model_output.past_key_values, hidden_states=model_output.hidden_states, attentions=model_output.attentions)

    def get_input_embeddings(self):
        return self.cpmant.input_embedding

    def set_input_embeddings(self, embeddings):
        self.cpmant.input_embedding = embeddings

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def prepare_inputs_for_generation(self, input_ids, **kwargs):
        input_ids = input_ids.int()
        if 'attention_mask' in kwargs:
            kwargs['attention_mask'] = torch.zeros(1, 1)
        return {'input_ids': input_ids, 'use_cache': kwargs['use_cache'], 'past_key_values': kwargs.get('past_key_values', None)}

    def _reorder_cache(self, past_key_values, beam_idx):
        past_key_values = [list(each) if each is not None else each for each in past_key_values]
        for key_value_layer in past_key_values:
            key_value_layer[0] = key_value_layer[0][beam_idx]
            key_value_layer[1] = key_value_layer[1][beam_idx]
        return past_key_values

# File: transformers-main/src/transformers/models/cpmant/tokenization_cpmant.py
""""""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
    import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}

def load_vocab(vocab_file):
    vocab = collections.OrderedDict()
    with open(vocab_file, 'r', encoding='utf-8') as reader:
        tokens = reader.readlines()
    for (index, token) in enumerate(tokens):
        token = token.rstrip('\n')
        vocab[token] = index
    return vocab

class WordpieceTokenizer:

    def __init__(self, vocab, unk_token='', max_input_chars_per_word=200):
        self.vocab = vocab
        self.unk_token = unk_token
        self.max_input_chars_per_word = max_input_chars_per_word

    def tokenize(self, token):
        chars = list(token)
        if len(chars) > self.max_input_chars_per_word:
            return [self.unk_token]
        start = 0
        sub_tokens = []
        while start < len(chars):
            end = len(chars)
            cur_substr = None
            while start < end:
                substr = ''.join(chars[start:end])
                if substr in self.vocab:
                    cur_substr = substr
                    break
                end -= 1
            if cur_substr is None:
                sub_tokens.append(self.unk_token)
                start += 1
            else:
                sub_tokens.append(cur_substr)
                start = end
        return sub_tokens

class CpmAntTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask']
    add_prefix_space = False

    def __init__(self, vocab_file, bod_token='', eod_token='', bos_token='', eos_token='', pad_token='', unk_token='', line_token='', space_token='', padding_side='left', **kwargs):
        requires_backends(self, ['jieba'])
        self.bod_token = bod_token
        self.eod_token = eod_token
        self.encoder = load_vocab(vocab_file)
        self.encoder[' '] = self.encoder[space_token]
        self.encoder['\n'] = self.encoder[line_token]
        del self.encoder[space_token]
        del self.encoder[line_token]
        self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
        self.decoder = {v: k for (k, v) in self.encoder.items()}
        self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token)
        super().__init__(bod_token=bod_token, eod_token=eod_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, unk_token=unk_token, line_token=line_token, space_token=space_token, padding_side=padding_side, **kwargs)

    @property
    def bod_token_id(self):
        return self.encoder[self.bod_token]

    @property
    def eod_token_id(self):
        return self.encoder[self.eod_token]

    @property
    def newline_id(self):
        return self.encoder['\n']

    @property
    def vocab_size(self) -> int:
        return len(self.encoder)

    def get_vocab(self):
        return dict(self.encoder, **self.added_tokens_encoder)

    def _tokenize(self, text):
        output_tokens = []
        for x in jieba.cut(text, cut_all=False):
            output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))
        return output_tokens

    def _decode(self, token_ids, **kwargs):
        token_ids = [i for i in token_ids if i >= 0]
        token_ids = [x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and (x != self.bos_token_id)]
        return super()._decode(token_ids, **kwargs)

    def check(self, token):
        return token in self.encoder

    def convert_tokens_to_string(self, tokens: List[str]) -> str:
        return ''.join(tokens)

    def _convert_token_to_id(self, token):
        return self.encoder.get(token, self.encoder.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.decoder.get(index, self.unk_token)

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if os.path.isdir(save_directory):
            vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        else:
            vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
        index = 0
        if ' ' in self.encoder:
            self.encoder[''] = self.encoder[' ']
            del self.encoder[' ']
        if '\n' in self.encoder:
            self.encoder[''] = self.encoder['\n']
            del self.encoder['\n']
        self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))
        with open(vocab_file, 'w', encoding='utf-8') as writer:
            for (token, token_index) in self.encoder.items():
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
                    index = token_index
                writer.write(token + '\n')
                index += 1
        return (vocab_file,)

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: List[int]=None) -> List[int]:
        if token_ids_1 is None:
            return [self.bos_token_id] + token_ids_0
        return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1

    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is not None:
            return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)
        return [1] + [0] * len(token_ids_0)

# File: transformers-main/src/transformers/models/ctrl/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {'configuration_ctrl': ['CTRLConfig'], 'tokenization_ctrl': ['CTRLTokenizer']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_ctrl'] = ['CTRLForSequenceClassification', 'CTRLLMHeadModel', 'CTRLModel', 'CTRLPreTrainedModel']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_ctrl'] = ['TFCTRLForSequenceClassification', 'TFCTRLLMHeadModel', 'TFCTRLModel', 'TFCTRLPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_ctrl import CTRLConfig
    from .tokenization_ctrl import CTRLTokenizer
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_ctrl import TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/ctrl/configuration_ctrl.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class CTRLConfig(PretrainedConfig):
    model_type = 'ctrl'
    keys_to_ignore_at_inference = ['past_key_values']
    attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}

    def __init__(self, vocab_size=246534, n_positions=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, use_cache=True, **kwargs):
        self.vocab_size = vocab_size
        self.n_positions = n_positions
        self.n_embd = n_embd
        self.n_layer = n_layer
        self.n_head = n_head
        self.dff = dff
        self.resid_pdrop = resid_pdrop
        self.embd_pdrop = embd_pdrop
        self.layer_norm_epsilon = layer_norm_epsilon
        self.initializer_range = initializer_range
        self.use_cache = use_cache
        super().__init__(**kwargs)

# File: transformers-main/src/transformers/models/ctrl/modeling_ctrl.py
""""""
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_ctrl import CTRLConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'CTRLConfig'

def angle_defn(pos, i, d_model_size):
    angle_rates = 1 / torch.pow(10000, 2 * (i // 2) / d_model_size)
    return pos * angle_rates

def positional_encoding(position, d_model_size, dtype):
    angle_rads = angle_defn(torch.arange(position, dtype=torch.int64).to(dtype).unsqueeze(1), torch.arange(d_model_size, dtype=torch.int64).to(dtype).unsqueeze(0), d_model_size)
    sines = torch.sin(angle_rads[:, 0::2])
    cosines = torch.cos(angle_rads[:, 1::2])
    pos_encoding = torch.cat([sines, cosines], dim=-1)
    return pos_encoding

def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
    matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
    dk = k.shape[-1]
    scaled_attention_logits = matmul_qk / np.sqrt(dk)
    if mask is not None:
        (nd, ns) = (scaled_attention_logits.size(-2), scaled_attention_logits.size(-1))
        scaled_attention_logits += mask[ns - nd:ns, :ns] * -10000.0
    if attention_mask is not None:
        scaled_attention_logits = scaled_attention_logits + attention_mask
    attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
    if head_mask is not None:
        attention_weights = attention_weights * head_mask
    output = torch.matmul(attention_weights, v)
    return (output, attention_weights)

class MultiHeadAttention(nn.Module):

    def __init__(self, d_model_size, num_heads):
        super().__init__()
        self.num_heads = num_heads
        self.d_model_size = d_model_size
        self.depth = int(d_model_size / self.num_heads)
        self.Wq = nn.Linear(d_model_size, d_model_size)
        self.Wk = nn.Linear(d_model_size, d_model_size)
        self.Wv = nn.Linear(d_model_size, d_model_size)
        self.dense = nn.Linear(d_model_size, d_model_size)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        attention_head_size = self.d_model_size // self.num_heads
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.num_heads, attention_head_size, self.pruned_heads)
        self.Wq = prune_linear_layer(self.Wq, index)
        self.Wk = prune_linear_layer(self.Wk, index)
        self.Wv = prune_linear_layer(self.Wv, index)
        self.dense = prune_linear_layer(self.dense, index, dim=1)
        self.num_heads = self.num_heads - len(heads)
        self.d_model_size = attention_head_size * self.num_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def split_into_heads(self, x, batch_size):
        x = x.reshape(batch_size, -1, self.num_heads, self.depth)
        return x.permute([0, 2, 1, 3])

    def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False):
        batch_size = q.shape[0]
        q = self.Wq(q)
        k = self.Wk(k)
        v = self.Wv(v)
        q = self.split_into_heads(q, batch_size)
        k = self.split_into_heads(k, batch_size)
        v = self.split_into_heads(v, batch_size)
        if layer_past is not None:
            (past_key, past_value) = (layer_past[0], layer_past[1])
            k = torch.cat((past_key, k), dim=-2)
            v = torch.cat((past_value, v), dim=-2)
        if use_cache is True:
            present = torch.stack((k, v))
        else:
            present = (None,)
        output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
        scaled_attention = output[0].permute([0, 2, 1, 3])
        attn = output[1]
        original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
        output = self.dense(original_size_attention)
        outputs = (output, present)
        if output_attentions:
            outputs = outputs + (attn,)
        return outputs

def point_wise_feed_forward_network(d_model_size, dff):
    return nn.Sequential(nn.Linear(d_model_size, dff), nn.ReLU(), nn.Linear(dff, d_model_size))

class EncoderLayer(nn.Module):

    def __init__(self, d_model_size, num_heads, dff, rate=0.1):
        super().__init__()
        self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads)
        self.ffn = point_wise_feed_forward_network(d_model_size, dff)
        self.layernorm1 = nn.LayerNorm(d_model_size, eps=1e-06)
        self.layernorm2 = nn.LayerNorm(d_model_size, eps=1e-06)
        self.dropout1 = nn.Dropout(rate)
        self.dropout2 = nn.Dropout(rate)

    def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False):
        normed = self.layernorm1(x)
        attn_outputs = self.multi_head_attention(normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions)
        attn_output = attn_outputs[0]
        attn_output = self.dropout1(attn_output)
        out1 = x + attn_output
        out2 = self.layernorm2(out1)
        ffn_output = self.ffn(out2)
        ffn_output = self.dropout2(ffn_output)
        out2 = out1 + ffn_output
        outputs = (out2,) + attn_outputs[1:]
        return outputs

class CTRLPreTrainedModel(PreTrainedModel):
    config_class = CTRLConfig
    base_model_prefix = 'transformer'

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, Conv1D)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
CTRL_START_DOCSTRING = '\n\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CTRL_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`\n            (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.\n\n            If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as\n            `input_ids`.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n            [`PreTrainedTokenizer.encode`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        past_key_values (`Tuple[Tuple[torch.FloatTensor]]` of length `config.n_layers`):\n            Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n            `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n            their past given to this model should not be passed as input ids as they have already been computed.\n        attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.', CTRL_START_DOCSTRING)
class CTRLModel(CTRLPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.d_model_size = config.n_embd
        self.num_layers = config.n_layer
        self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
        self.w = nn.Embedding(config.vocab_size, config.n_embd)
        self.dropout = nn.Dropout(config.embd_pdrop)
        self.h = nn.ModuleList([EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop) for _ in range(config.n_layer)])
        self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
        self.post_init()

    def get_input_embeddings(self):
        return self.w

    def set_input_embeddings(self, new_embeddings):
        self.w = new_embeddings

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.h[layer].multi_head_attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
            input_ids = input_ids.view(-1, input_shape[-1])
            batch_size = input_ids.shape[0]
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
            batch_size = inputs_embeds.shape[0]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        if past_key_values is None:
            past_length = 0
            past_key_values = tuple([None] * len(self.h))
        else:
            past_length = past_key_values[0][0].size(-2)
        if position_ids is None:
            position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
            position_ids = position_ids.unsqueeze(0)
        if attention_mask is not None:
            if batch_size <= 0:
                raise ValueError('batch_size has to be defined and > 0')
            attention_mask = attention_mask.view(batch_size, -1)
            attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
            attention_mask = attention_mask.to(dtype=self.dtype)
            attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
        head_mask = self.get_head_mask(head_mask, self.config.n_layer)
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, input_shape[-1])
            token_type_embeds = self.w(token_type_ids)
            token_type_embeds *= np.sqrt(self.d_model_size)
        else:
            token_type_embeds = 0
        if inputs_embeds is None:
            inputs_embeds = self.w(input_ids)
        seq_len = input_shape[-1]
        mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(device)
        inputs_embeds *= np.sqrt(self.d_model_size)
        self.pos_encoding = self.pos_encoding.to(device)
        pos_embeds = self.pos_encoding[position_ids, :]
        hidden_states = inputs_embeds + pos_embeds + token_type_embeds
        hidden_states = self.dropout(hidden_states)
        presents = () if use_cache else None
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, (h, layer_past)) in enumerate(zip(self.h, past_key_values)):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            outputs = h(hidden_states, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions)
            (hidden_states, present) = outputs[:2]
            if use_cache is True:
                presents = presents + (present,)
            if output_attentions:
                all_attentions += (outputs[2],)
        hidden_states = self.layernorm(hidden_states)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None))
        return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions)

@add_start_docstrings('\n    The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n    embeddings).\n    ', CTRL_START_DOCSTRING)
class CTRLLMHeadModel(CTRLPreTrainedModel):
    _tied_weights_keys = ['lm_head.weight']

    def __init__(self, config):
        super().__init__(config)
        self.transformer = CTRLModel(config)
        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_head

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, **kwargs):
        if past_key_values is not None:
            past_length = past_key_values[0][0].shape[2]
            if input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                remove_prefix_length = input_ids.shape[1] - 1
            input_ids = input_ids[:, remove_prefix_length:]
        return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache}

    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = transformer_outputs[0]
        lm_logits = self.lm_head(hidden_states)
        loss = None
        if labels is not None:
            shift_logits = lm_logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
        if not return_dict:
            output = (lm_logits,) + transformer_outputs[1:]
            return (loss,) + output if loss is not None else output
        return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)

    @staticmethod
    def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
        return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values))

@add_start_docstrings('\n    The CTRL Model transformer with a sequence classification head on top (linear layer).\n    [`CTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last\n    token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in\n    each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot\n    guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last\n    value in each row of the batch).\n    ', CTRL_START_DOCSTRING)
class CTRLForSequenceClassification(CTRLPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.transformer = CTRLModel(config)
        self.classifier = nn.Linear(config.n_embd, self.num_labels, bias=False)
        self.post_init()

    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = transformer_outputs[0]
        logits = self.classifier(hidden_states)
        if input_ids is not None:
            (batch_size, sequence_length) = input_ids.shape[:2]
        else:
            (batch_size, sequence_length) = inputs_embeds.shape[:2]
        if self.config.pad_token_id is None and batch_size != 1:
            raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
        if self.config.pad_token_id is None:
            sequence_lengths = -1
        elif input_ids is not None:
            sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
            sequence_lengths = sequence_lengths % input_ids.shape[-1]
            sequence_lengths = sequence_lengths.to(logits.device)
        else:
            sequence_lengths = -1
            logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
        pooled_logits = logits[range(batch_size), sequence_lengths]
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(pooled_logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(pooled_logits, labels)
        if not return_dict:
            output = (pooled_logits,) + transformer_outputs[2:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)

# File: transformers-main/src/transformers/models/ctrl/modeling_tf_ctrl.py
""""""
from __future__ import annotations
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput
from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_ctrl import CTRLConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'Salesforce/ctrl'
_CONFIG_FOR_DOC = 'CTRLConfig'

def angle_defn(pos, i, d_model_size):
    angle_rates = 1 / np.power(10000, 2 * (i // 2) / d_model_size)
    return pos * angle_rates

def positional_encoding(position, d_model_size):
    angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
    sines = np.sin(angle_rads[:, 0::2])
    cosines = np.cos(angle_rads[:, 1::2])
    pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))
    return pos_encoding

def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
    matmul_qk = tf.matmul(q, k, transpose_b=True)
    dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)
    scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
    if mask is not None:
        scaled_attention_logits += tf.cast(mask * -10000.0, dtype=scaled_attention_logits.dtype)
    if attention_mask is not None:
        attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)
        scaled_attention_logits = scaled_attention_logits + attention_mask
    attention_weights = stable_softmax(scaled_attention_logits, axis=-1)
    if head_mask is not None:
        attention_weights = attention_weights * head_mask
    output = tf.matmul(attention_weights, v)
    return (output, attention_weights)

class TFMultiHeadAttention(keras.layers.Layer):

    def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
        super().__init__(**kwargs)
        self.num_heads = num_heads
        self.d_model_size = d_model_size
        self.output_attentions = output_attentions
        self.depth = int(d_model_size / self.num_heads)
        self.Wq = keras.layers.Dense(d_model_size, name='Wq')
        self.Wk = keras.layers.Dense(d_model_size, name='Wk')
        self.Wv = keras.layers.Dense(d_model_size, name='Wv')
        self.dense = keras.layers.Dense(d_model_size, name='dense')

    def split_into_heads(self, x, batch_size):
        x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
        return tf.transpose(x, perm=[0, 2, 1, 3])

    def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
        batch_size = shape_list(q)[0]
        q = self.Wq(q)
        k = self.Wk(k)
        v = self.Wv(v)
        q = self.split_into_heads(q, batch_size)
        k = self.split_into_heads(k, batch_size)
        v = self.split_into_heads(v, batch_size)
        if layer_past is not None:
            (past_key, past_value) = tf.unstack(layer_past, axis=0)
            k = tf.concat((past_key, k), axis=-2)
            v = tf.concat((past_value, v), axis=-2)
        if use_cache:
            present = tf.stack((k, v), axis=0)
        else:
            present = (None,)
        output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
        scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
        attn = output[1]
        original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
        output = self.dense(original_size_attention)
        outputs = (output, present)
        if output_attentions:
            outputs = outputs + (attn,)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'Wq', None) is not None:
            with tf.name_scope(self.Wq.name):
                self.Wq.build([None, None, self.d_model_size])
        if getattr(self, 'Wk', None) is not None:
            with tf.name_scope(self.Wk.name):
                self.Wk.build([None, None, self.d_model_size])
        if getattr(self, 'Wv', None) is not None:
            with tf.name_scope(self.Wv.name):
                self.Wv.build([None, None, self.d_model_size])
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.d_model_size])

class TFPointWiseFeedForwardLayer(keras.layers.Layer):

    def __init__(self, d_model_size, dff, **kwargs):
        super().__init__(**kwargs)
        self.dense_0 = keras.layers.Dense(dff, activation='relu', name='0')
        self.dense_2 = keras.layers.Dense(d_model_size, name='2')
        self.d_model_size = d_model_size
        self.dff = dff

    def call(self, inputs, trainable=False):
        dense_0_output = self.dense_0(inputs)
        dense_2_output = self.dense_2(dense_0_output)
        return dense_2_output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense_0', None) is not None:
            with tf.name_scope(self.dense_0.name):
                self.dense_0.build([None, None, self.d_model_size])
        if getattr(self, 'dense_2', None) is not None:
            with tf.name_scope(self.dense_2.name):
                self.dense_2.build([None, None, self.dff])

class TFEncoderLayer(keras.layers.Layer):

    def __init__(self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-06, output_attentions=False, **kwargs):
        super().__init__(**kwargs)
        self.output_attentions = output_attentions
        self.multi_head_attention = TFMultiHeadAttention(d_model_size, num_heads, output_attentions=self.output_attentions, name='multi_head_attention')
        self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name='ffn')
        self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layernorm1')
        self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layernorm2')
        self.dropout1 = keras.layers.Dropout(rate)
        self.dropout2 = keras.layers.Dropout(rate)
        self.d_model_size = d_model_size

    def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):
        normed = self.layernorm1(x)
        attn_outputs = self.multi_head_attention(normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=training)
        attn_output = attn_outputs[0]
        attn_output = self.dropout1(attn_output, training=training)
        out1 = x + attn_output
        out2 = self.layernorm2(out1)
        ffn_output = self.ffn(out2)
        ffn_output = self.dropout2(ffn_output, training=training)
        out2 = out1 + ffn_output
        outputs = (out2,) + attn_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'multi_head_attention', None) is not None:
            with tf.name_scope(self.multi_head_attention.name):
                self.multi_head_attention.build(None)
        if getattr(self, 'ffn', None) is not None:
            with tf.name_scope(self.ffn.name):
                self.ffn.build(None)
        if getattr(self, 'layernorm1', None) is not None:
            with tf.name_scope(self.layernorm1.name):
                self.layernorm1.build([None, None, self.d_model_size])
        if getattr(self, 'layernorm2', None) is not None:
            with tf.name_scope(self.layernorm2.name):
                self.layernorm2.build([None, None, self.d_model_size])

@keras_serializable
class TFCTRLMainLayer(keras.layers.Layer):
    config_class = CTRLConfig

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.output_hidden_states = config.output_hidden_states
        self.output_attentions = config.output_attentions
        self.use_cache = config.use_cache
        self.return_dict = config.use_return_dict
        self.d_model_size = config.n_embd
        self.num_layers = config.n_layer
        self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
        self.w = keras.layers.Embedding(input_dim=config.vocab_size, output_dim=config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name='w')
        self.dropout = keras.layers.Dropout(config.embd_pdrop)
        self.h = [TFEncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.layer_norm_epsilon, self.output_attentions, name=f'h_._{i}') for i in range(config.n_layer)]
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='layernorm')

    def get_input_embeddings(self):
        return self.w

    def set_input_embeddings(self, new_embeddings):
        self.w = new_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    @unpack_inputs
    def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutputWithPast]:
        if past_key_values is not None:
            if input_ids is not None:
                input_ids = input_ids[:, -1:]
            if inputs_embeds is not None:
                inputs_embeds = inputs_embeds[:, -1:]
            if token_type_ids is not None:
                token_type_ids = token_type_ids[:, -1:]
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            input_shape = shape_list(input_ids)
            input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
        elif inputs_embeds is not None:
            input_shape = shape_list(inputs_embeds)[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        if past_key_values is None:
            past_length = 0
            past_key_values = [None] * len(self.h)
        else:
            past_length = shape_list(past_key_values[0][0])[-2]
        if position_ids is None:
            position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0)
            position_ids = tf.tile(position_ids, [input_shape[0], 1])
        if attention_mask is not None:
            attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length))
            one_cst = tf.constant(1.0)
            ten_thousand_cst = tf.constant(-10000.0)
            attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
            attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst)
        if head_mask is not None:
            raise NotImplementedError
        else:
            head_mask = [None] * self.num_layers
        if token_type_ids is not None:
            token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
            token_type_embeds = self.w(token_type_ids)
            token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))
        else:
            token_type_embeds = tf.constant(0.0)
        position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
        if inputs_embeds is None:
            check_embeddings_within_bounds(input_ids, self.w.input_dim)
            inputs_embeds = self.w(input_ids)
        seq_len = input_shape[-1]
        mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
        inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype))
        pos_embeds = tf.gather(self.pos_encoding, position_ids)
        pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)
        hidden_states = inputs_embeds + pos_embeds + token_type_embeds
        hidden_states = self.dropout(hidden_states, training=training)
        output_shape = input_shape + [shape_list(hidden_states)[-1]]
        presents = () if use_cache else None
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, (h, layer_past)) in enumerate(zip(self.h, past_key_values)):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
            outputs = h(hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache, output_attentions, training=training)
            (hidden_states, present) = outputs[:2]
            if use_cache:
                presents = presents + (present,)
            if output_attentions:
                all_attentions = all_attentions + (outputs[2],)
        hidden_states = self.layernorm(hidden_states)
        hidden_states = tf.reshape(hidden_states, output_shape)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if output_attentions:
            attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
            all_attentions = tuple((tf.reshape(t, attention_output_shape) for t in all_attentions))
        if not return_dict:
            return tuple((v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None))
        return TFBaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'w', None) is not None:
            with tf.name_scope(self.w.name):
                self.w.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, self.config.n_embd])
        if getattr(self, 'h', None) is not None:
            for layer in self.h:
                with tf.name_scope(layer.name):
                    layer.build(None)

class TFCTRLPreTrainedModel(TFPreTrainedModel):
    config_class = CTRLConfig
    base_model_prefix = 'transformer'
CTRL_START_DOCSTRING = '\n\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Parameters:\n        config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CTRL_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n            `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of\n            input past key value states).\n\n            Indices of input sequence tokens in the vocabulary.\n\n            If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n            [`PreTrainedTokenizer.encode`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        past (`List[tf.Tensor]` of length `config.n_layers`):\n            Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n            `past` output below). Can be used to speed up sequential decoding. The token ids which have their past\n            given to this model should not be passed as input ids as they have already been computed.\n        attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n            config will be used instead.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n            used instead.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n            eager mode, in graph mode the value will always be set to True.\n        training (`bool`, *optional*, defaults to `False`):\n            Whether or not to use the model in training mode (some modules like dropout modules have different\n            behaviors between training and evaluation).\n"

@add_start_docstrings('The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.', CTRL_START_DOCSTRING)
class TFCTRLModel(TFCTRLPreTrainedModel):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.transformer = TFCTRLMainLayer(config, name='transformer')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutputWithPast]:
        outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'transformer', None) is not None:
            with tf.name_scope(self.transformer.name):
                self.transformer.build(None)

class TFCTRLBiasLayer(keras.layers.Layer):

    def __init__(self, shape, initializer, trainable, name, **kwargs):
        super().__init__(name=name, **kwargs)
        self.shape = shape
        self.initializer = initializer
        self.trainable = trainable

    def build(self, input_shape):
        self.bias = self.add_weight(name='bias', shape=self.shape, initializer=self.initializer, trainable=self.trainable)
        super().build(input_shape)

    def call(self, x):
        return x + self.bias

@add_start_docstrings('\n    The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n    embeddings).\n    ', CTRL_START_DOCSTRING)
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.transformer = TFCTRLMainLayer(config, name='transformer')
        self.bias_layer = TFCTRLBiasLayer(name='lm_head', shape=[1, config.vocab_size], initializer='zeros', trainable=True)

    def get_output_embeddings(self):
        return self.get_input_embeddings()

    def set_output_embeddings(self, value):
        self.set_input_embeddings(value)

    def get_bias(self):
        return {'lm_head.bias': self.bias_layer.bias}

    def set_bias(self, value):
        vocab_size = value['lm_head.bias'].shape[-1]
        self.bias_layer = TFCTRLBiasLayer(name='final_logits_bias', shape=[1, vocab_size], initializer='zeros', trainable=True)
        self.bias_layer.build(None)
        self.bias_layer.bias.assign(value['lm_head.bias'])

    def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
        token_type_ids = kwargs.get('token_type_ids', None)
        if past_key_values:
            inputs = tf.expand_dims(inputs[:, -1], -1)
            if token_type_ids is not None:
                token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
        position_ids = kwargs.get('position_ids', None)
        attention_mask = kwargs.get('attention_mask', None)
        if attention_mask is not None and position_ids is None:
            position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
            if past_key_values:
                position_ids = tf.expand_dims(position_ids[:, -1], -1)
        return {'input_ids': inputs, 'attention_mask': attention_mask, 'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids}

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFCausalLMOutputWithPast]:
        transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        hidden_states = transformer_outputs[0]
        logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True)
        logits = self.bias_layer(logits)
        loss = None
        if labels is not None:
            shifted_logits = logits[:, :-1]
            labels = labels[:, 1:]
            loss = self.hf_compute_loss(labels, shifted_logits)
        if not return_dict:
            output = (logits,) + transformer_outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'transformer', None) is not None:
            with tf.name_scope(self.transformer.name):
                self.transformer.build(None)
        if getattr(self, 'bias_layer', None) is not None:
            with tf.name_scope(self.bias_layer.name):
                self.bias_layer.build(None)

@add_start_docstrings('\n    The CTRL Model transformer with a sequence classification head on top (linear layer).\n\n    [`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-1, GPT-2) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    ', CTRL_START_DOCSTRING)
class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier', use_bias=False)
        self.transformer = TFCTRLMainLayer(config, name='transformer')
        self.config = config

    def get_output_embeddings(self):
        logger.warning('Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed in transformers v4.32.')
        return self.transformer.w

    @unpack_inputs
    @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFSequenceClassifierOutput]:
        transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        hidden_states = transformer_outputs[0]
        logits = self.classifier(hidden_states)
        in_logits = None
        if self.config.pad_token_id is None:
            sequence_lengths = -1
        elif input_ids is not None:
            sequence_lengths = tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1
            sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
            in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
        else:
            sequence_lengths = -1
            logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
        loss = None
        if labels is not None:
            if input_ids is not None:
                (batch_size, sequence_length) = shape_list(input_ids)[:2]
            else:
                (batch_size, sequence_length) = shape_list(inputs_embeds)[:2]
            if self.config.pad_token_id is None and batch_size != 1:
                raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
            if not tf.is_tensor(sequence_lengths):
                in_logits = logits[0:batch_size, sequence_lengths]
            loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
        pooled_logits = in_logits if in_logits is not None else logits
        if not return_dict:
            output = (pooled_logits,) + transformer_outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.n_embd])
        if getattr(self, 'transformer', None) is not None:
            with tf.name_scope(self.transformer.name):
                self.transformer.build(None)

# File: transformers-main/src/transformers/models/ctrl/tokenization_ctrl.py
""""""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
CONTROL_CODES = {'Pregnancy': 168629, 'Christianity': 7675, 'Explain': 106423, 'Fitness': 63440, 'Saving': 63163, 'Ask': 27171, 'Ass': 95985, 'Joke': 163509, 'Questions': 45622, 'Thoughts': 49605, 'Retail': 52342, 'Feminism': 164338, 'Writing': 11992, 'Atheism': 192263, 'Netflix': 48616, 'Computing': 39639, 'Opinion': 43213, 'Alone': 44967, 'Funny': 58917, 'Gaming': 40358, 'Human': 4088, 'India': 1331, 'Joker': 77138, 'Diet': 36206, 'Legal': 11859, 'Norman': 4939, 'Tip': 72689, 'Weight': 52343, 'Movies': 46273, 'Running': 23425, 'Science': 2090, 'Horror': 37793, 'Confession': 60572, 'Finance': 12250, 'Politics': 16360, 'Scary': 191985, 'Support': 12654, 'Technologies': 32516, 'Teenage': 66160, 'Event': 32769, 'Learned': 67460, 'Notion': 182770, 'Wikipedia': 37583, 'Books': 6665, 'Extract': 76050, 'Confessions': 102701, 'Conspiracy': 75932, 'Links': 63674, 'Narcissus': 150425, 'Relationship': 54766, 'Relationships': 134796, 'Reviews': 41671, 'News': 4256, 'Translation': 26820, 'multilingual': 128406}

def get_pairs(word):
    pairs = set()
    prev_char = word[0]
    for char in word[1:]:
        pairs.add((prev_char, char))
        prev_char = char
    pairs = set(pairs)
    return pairs

class CTRLTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
    control_codes = CONTROL_CODES

    def __init__(self, vocab_file, merges_file, unk_token='', **kwargs):
        with open(vocab_file, encoding='utf-8') as vocab_handle:
            self.encoder = json.load(vocab_handle)
        self.decoder = {v: k for (k, v) in self.encoder.items()}
        with open(merges_file, encoding='utf-8') as merges_handle:
            merges = merges_handle.read().split('\n')[1:-1]
        merges = [tuple(merge.split()) for merge in merges]
        self.bpe_ranks = dict(zip(merges, range(len(merges))))
        self.cache = {}
        super().__init__(unk_token=unk_token, **kwargs)

    @property
    def vocab_size(self):
        return len(self.encoder)

    def get_vocab(self):
        return dict(self.encoder, **self.added_tokens_encoder)

    def bpe(self, token):
        if token in self.cache:
            return self.cache[token]
        word = tuple(token)
        word = tuple(list(word[:-1]) + [word[-1] + ''])
        pairs = get_pairs(word)
        if not pairs:
            return token
        while True:
            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
            if bigram not in self.bpe_ranks:
                break
            (first, second) = bigram
            new_word = []
            i = 0
            while i < len(word):
                try:
                    j = word.index(first, i)
                except ValueError:
                    new_word.extend(word[i:])
                    break
                else:
                    new_word.extend(word[i:j])
                    i = j
                if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
                    new_word.append(first + second)
                    i += 2
                else:
                    new_word.append(word[i])
                    i += 1
            new_word = tuple(new_word)
            word = new_word
            if len(word) == 1:
                break
            else:
                pairs = get_pairs(word)
        word = '@@ '.join(word)
        word = word[:-4]
        self.cache[token] = word
        return word

    def _tokenize(self, text):
        split_tokens = []
        words = re.findall('\\S+\\n?', text)
        for token in words:
            split_tokens.extend(list(self.bpe(token).split(' ')))
        return split_tokens

    def _convert_token_to_id(self, token):
        return self.encoder.get(token, self.encoder.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.decoder.get(index, self.unk_token)

    def convert_tokens_to_string(self, tokens):
        out_string = ' '.join(tokens).replace('@@ ', '').strip()
        return out_string

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
        with open(vocab_file, 'w', encoding='utf-8') as f:
            f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
        index = 0
        with open(merge_file, 'w', encoding='utf-8') as writer:
            writer.write('#version: 0.2\n')
            for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
                    index = token_index
                writer.write(' '.join(bpe_tokens) + '\n')
                index += 1
        return (vocab_file, merge_file)

# File: transformers-main/src/transformers/models/cvt/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {'configuration_cvt': ['CvtConfig']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_cvt'] = ['CvtForImageClassification', 'CvtModel', 'CvtPreTrainedModel']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_cvt'] = ['TFCvtForImageClassification', 'TFCvtModel', 'TFCvtPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_cvt import CvtConfig
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_cvt import CvtForImageClassification, CvtModel, CvtPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_cvt import TFCvtForImageClassification, TFCvtModel, TFCvtPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/cvt/configuration_cvt.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class CvtConfig(PretrainedConfig):
    model_type = 'cvt'

    def __init__(self, num_channels=3, patch_sizes=[7, 3, 3], patch_stride=[4, 2, 2], patch_padding=[2, 1, 1], embed_dim=[64, 192, 384], num_heads=[1, 3, 6], depth=[1, 2, 10], mlp_ratio=[4.0, 4.0, 4.0], attention_drop_rate=[0.0, 0.0, 0.0], drop_rate=[0.0, 0.0, 0.0], drop_path_rate=[0.0, 0.0, 0.1], qkv_bias=[True, True, True], cls_token=[False, False, True], qkv_projection_method=['dw_bn', 'dw_bn', 'dw_bn'], kernel_qkv=[3, 3, 3], padding_kv=[1, 1, 1], stride_kv=[2, 2, 2], padding_q=[1, 1, 1], stride_q=[1, 1, 1], initializer_range=0.02, layer_norm_eps=1e-12, **kwargs):
        super().__init__(**kwargs)
        self.num_channels = num_channels
        self.patch_sizes = patch_sizes
        self.patch_stride = patch_stride
        self.patch_padding = patch_padding
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.depth = depth
        self.mlp_ratio = mlp_ratio
        self.attention_drop_rate = attention_drop_rate
        self.drop_rate = drop_rate
        self.drop_path_rate = drop_path_rate
        self.qkv_bias = qkv_bias
        self.cls_token = cls_token
        self.qkv_projection_method = qkv_projection_method
        self.kernel_qkv = kernel_qkv
        self.padding_kv = padding_kv
        self.stride_kv = stride_kv
        self.padding_q = padding_q
        self.stride_q = stride_q
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps

# File: transformers-main/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py
""""""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification

def embeddings(idx):
    embed = []
    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight'))
    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias'))
    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight'))
    embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias'))
    return embed

def attention(idx, cnt):
    attention_weights = []
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight'))
    attention_weights.append((f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias'))
    return attention_weights

def cls_token(idx):
    token = []
    token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token'))
    return token

def final():
    head = []
    head.append(('layernorm.weight', 'norm.weight'))
    head.append(('layernorm.bias', 'norm.bias'))
    head.append(('classifier.weight', 'head.weight'))
    head.append(('classifier.bias', 'head.bias'))
    return head

def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_folder):
    img_labels_file = 'imagenet-1k-id2label.json'
    num_labels = 1000
    repo_id = 'huggingface/label-files'
    num_labels = num_labels
    id2label = json.loads(Path(hf_hub_download(repo_id, img_labels_file, repo_type='dataset')).read_text())
    id2label = {int(k): v for (k, v) in id2label.items()}
    id2label = id2label
    label2id = {v: k for (k, v) in id2label.items()}
    config = config = CvtConfig(num_labels=num_labels, id2label=id2label, label2id=label2id)
    if cvt_model.rsplit('/', 1)[-1][4:6] == '13':
        config.depth = [1, 2, 10]
    elif cvt_model.rsplit('/', 1)[-1][4:6] == '21':
        config.depth = [1, 4, 16]
    else:
        config.depth = [2, 2, 20]
        config.num_heads = [3, 12, 16]
        config.embed_dim = [192, 768, 1024]
    model = CvtForImageClassification(config)
    image_processor = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k')
    image_processor.size['shortest_edge'] = image_size
    original_weights = torch.load(cvt_file_name, map_location=torch.device('cpu'))
    huggingface_weights = OrderedDict()
    list_of_state_dict = []
    for idx in range(len(config.depth)):
        if config.cls_token[idx]:
            list_of_state_dict = list_of_state_dict + cls_token(idx)
        list_of_state_dict = list_of_state_dict + embeddings(idx)
        for cnt in range(config.depth[idx]):
            list_of_state_dict = list_of_state_dict + attention(idx, cnt)
    list_of_state_dict = list_of_state_dict + final()
    for gg in list_of_state_dict:
        print(gg)
    for i in range(len(list_of_state_dict)):
        huggingface_weights[list_of_state_dict[i][0]] = original_weights[list_of_state_dict[i][1]]
    model.load_state_dict(huggingface_weights)
    model.save_pretrained(pytorch_dump_folder)
    image_processor.save_pretrained(pytorch_dump_folder)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--cvt_model', default='cvt-w24', type=str, help="Name of the cvt model you'd like to convert.")
    parser.add_argument('--image_size', default=384, type=int, help='Input Image Size')
    parser.add_argument('--cvt_file_name', default='cvtmodels\\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size')
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.')
    args = parser.parse_args()
    convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)

# File: transformers-main/src/transformers/models/cvt/modeling_cvt.py
""""""
import collections.abc
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import ImageClassifierOutputWithNoAttention, ModelOutput
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import logging
from .configuration_cvt import CvtConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'CvtConfig'
_CHECKPOINT_FOR_DOC = 'microsoft/cvt-13'
_EXPECTED_OUTPUT_SHAPE = [1, 384, 14, 14]
_IMAGE_CLASS_CHECKPOINT = 'microsoft/cvt-13'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

@dataclass
class BaseModelOutputWithCLSToken(ModelOutput):
    last_hidden_state: torch.FloatTensor = None
    cls_token_value: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None

def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor:
    if drop_prob == 0.0 or not training:
        return input
    keep_prob = 1 - drop_prob
    shape = (input.shape[0],) + (1,) * (input.ndim - 1)
    random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
    random_tensor.floor_()
    output = input.div(keep_prob) * random_tensor
    return output

class CvtDropPath(nn.Module):

    def __init__(self, drop_prob: Optional[float]=None) -> None:
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        return drop_path(hidden_states, self.drop_prob, self.training)

    def extra_repr(self) -> str:
        return 'p={}'.format(self.drop_prob)

class CvtEmbeddings(nn.Module):

    def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate):
        super().__init__()
        self.convolution_embeddings = CvtConvEmbeddings(patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding)
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, pixel_values):
        hidden_state = self.convolution_embeddings(pixel_values)
        hidden_state = self.dropout(hidden_state)
        return hidden_state

class CvtConvEmbeddings(nn.Module):

    def __init__(self, patch_size, num_channels, embed_dim, stride, padding):
        super().__init__()
        patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        self.patch_size = patch_size
        self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
        self.normalization = nn.LayerNorm(embed_dim)

    def forward(self, pixel_values):
        pixel_values = self.projection(pixel_values)
        (batch_size, num_channels, height, width) = pixel_values.shape
        hidden_size = height * width
        pixel_values = pixel_values.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
        if self.normalization:
            pixel_values = self.normalization(pixel_values)
        pixel_values = pixel_values.permute(0, 2, 1).view(batch_size, num_channels, height, width)
        return pixel_values

class CvtSelfAttentionConvProjection(nn.Module):

    def __init__(self, embed_dim, kernel_size, padding, stride):
        super().__init__()
        self.convolution = nn.Conv2d(embed_dim, embed_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=False, groups=embed_dim)
        self.normalization = nn.BatchNorm2d(embed_dim)

    def forward(self, hidden_state):
        hidden_state = self.convolution(hidden_state)
        hidden_state = self.normalization(hidden_state)
        return hidden_state

class CvtSelfAttentionLinearProjection(nn.Module):

    def forward(self, hidden_state):
        (batch_size, num_channels, height, width) = hidden_state.shape
        hidden_size = height * width
        hidden_state = hidden_state.view(batch_size, num_channels, hidden_size).permute(0, 2, 1)
        return hidden_state

class CvtSelfAttentionProjection(nn.Module):

    def __init__(self, embed_dim, kernel_size, padding, stride, projection_method='dw_bn'):
        super().__init__()
        if projection_method == 'dw_bn':
            self.convolution_projection = CvtSelfAttentionConvProjection(embed_dim, kernel_size, padding, stride)
        self.linear_projection = CvtSelfAttentionLinearProjection()

    def forward(self, hidden_state):
        hidden_state = self.convolution_projection(hidden_state)
        hidden_state = self.linear_projection(hidden_state)
        return hidden_state

class CvtSelfAttention(nn.Module):

    def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token=True, **kwargs):
        super().__init__()
        self.scale = embed_dim ** (-0.5)
        self.with_cls_token = with_cls_token
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.convolution_projection_query = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_q, stride_q, projection_method='linear' if qkv_projection_method == 'avg' else qkv_projection_method)
        self.convolution_projection_key = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method)
        self.convolution_projection_value = CvtSelfAttentionProjection(embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method)
        self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
        self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
        self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
        self.dropout = nn.Dropout(attention_drop_rate)

    def rearrange_for_multi_head_attention(self, hidden_state):
        (batch_size, hidden_size, _) = hidden_state.shape
        head_dim = self.embed_dim // self.num_heads
        return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3)

    def forward(self, hidden_state, height, width):
        if self.with_cls_token:
            (cls_token, hidden_state) = torch.split(hidden_state, [1, height * width], 1)
        (batch_size, hidden_size, num_channels) = hidden_state.shape
        hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
        key = self.convolution_projection_key(hidden_state)
        query = self.convolution_projection_query(hidden_state)
        value = self.convolution_projection_value(hidden_state)
        if self.with_cls_token:
            query = torch.cat((cls_token, query), dim=1)
            key = torch.cat((cls_token, key), dim=1)
            value = torch.cat((cls_token, value), dim=1)
        head_dim = self.embed_dim // self.num_heads
        query = self.rearrange_for_multi_head_attention(self.projection_query(query))
        key = self.rearrange_for_multi_head_attention(self.projection_key(key))
        value = self.rearrange_for_multi_head_attention(self.projection_value(value))
        attention_score = torch.einsum('bhlk,bhtk->bhlt', [query, key]) * self.scale
        attention_probs = torch.nn.functional.softmax(attention_score, dim=-1)
        attention_probs = self.dropout(attention_probs)
        context = torch.einsum('bhlt,bhtv->bhlv', [attention_probs, value])
        (_, _, hidden_size, _) = context.shape
        context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim)
        return context

class CvtSelfOutput(nn.Module):

    def __init__(self, embed_dim, drop_rate):
        super().__init__()
        self.dense = nn.Linear(embed_dim, embed_dim)
        self.dropout = nn.Dropout(drop_rate)

    def forward(self, hidden_state, input_tensor):
        hidden_state = self.dense(hidden_state)
        hidden_state = self.dropout(hidden_state)
        return hidden_state

class CvtAttention(nn.Module):

    def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token=True):
        super().__init__()
        self.attention = CvtSelfAttention(num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token)
        self.output = CvtSelfOutput(embed_dim, drop_rate)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
        self.attention.query = prune_linear_layer(self.attention.query, index)
        self.attention.key = prune_linear_layer(self.attention.key, index)
        self.attention.value = prune_linear_layer(self.attention.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
        self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
        self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_state, height, width):
        self_output = self.attention(hidden_state, height, width)
        attention_output = self.output(self_output, hidden_state)
        return attention_output

class CvtIntermediate(nn.Module):

    def __init__(self, embed_dim, mlp_ratio):
        super().__init__()
        self.dense = nn.Linear(embed_dim, int(embed_dim * mlp_ratio))
        self.activation = nn.GELU()

    def forward(self, hidden_state):
        hidden_state = self.dense(hidden_state)
        hidden_state = self.activation(hidden_state)
        return hidden_state

class CvtOutput(nn.Module):

    def __init__(self, embed_dim, mlp_ratio, drop_rate):
        super().__init__()
        self.dense = nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
        self.dropout = nn.Dropout(drop_rate)

    def forward(self, hidden_state, input_tensor):
        hidden_state = self.dense(hidden_state)
        hidden_state = self.dropout(hidden_state)
        hidden_state = hidden_state + input_tensor
        return hidden_state

class CvtLayer(nn.Module):

    def __init__(self, num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, mlp_ratio, drop_path_rate, with_cls_token=True):
        super().__init__()
        self.attention = CvtAttention(num_heads, embed_dim, kernel_size, padding_q, padding_kv, stride_q, stride_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token)
        self.intermediate = CvtIntermediate(embed_dim, mlp_ratio)
        self.output = CvtOutput(embed_dim, mlp_ratio, drop_rate)
        self.drop_path = CvtDropPath(drop_prob=drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
        self.layernorm_before = nn.LayerNorm(embed_dim)
        self.layernorm_after = nn.LayerNorm(embed_dim)

    def forward(self, hidden_state, height, width):
        self_attention_output = self.attention(self.layernorm_before(hidden_state), height, width)
        attention_output = self_attention_output
        attention_output = self.drop_path(attention_output)
        hidden_state = attention_output + hidden_state
        layer_output = self.layernorm_after(hidden_state)
        layer_output = self.intermediate(layer_output)
        layer_output = self.output(layer_output, hidden_state)
        layer_output = self.drop_path(layer_output)
        return layer_output

class CvtStage(nn.Module):

    def __init__(self, config, stage):
        super().__init__()
        self.config = config
        self.stage = stage
        if self.config.cls_token[self.stage]:
            self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.embed_dim[-1]))
        self.embedding = CvtEmbeddings(patch_size=config.patch_sizes[self.stage], stride=config.patch_stride[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage])
        drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate[self.stage], config.depth[stage])]
        self.layers = nn.Sequential(*[CvtLayer(num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], stride_kv=config.stride_kv[self.stage], stride_q=config.stride_q[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], drop_path_rate=drop_path_rates[self.stage], mlp_ratio=config.mlp_ratio[self.stage], with_cls_token=config.cls_token[self.stage]) for _ in range(config.depth[self.stage])])

    def forward(self, hidden_state):
        cls_token = None
        hidden_state = self.embedding(hidden_state)
        (batch_size, num_channels, height, width) = hidden_state.shape
        hidden_state = hidden_state.view(batch_size, num_channels, height * width).permute(0, 2, 1)
        if self.config.cls_token[self.stage]:
            cls_token = self.cls_token.expand(batch_size, -1, -1)
            hidden_state = torch.cat((cls_token, hidden_state), dim=1)
        for layer in self.layers:
            layer_outputs = layer(hidden_state, height, width)
            hidden_state = layer_outputs
        if self.config.cls_token[self.stage]:
            (cls_token, hidden_state) = torch.split(hidden_state, [1, height * width], 1)
        hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
        return (hidden_state, cls_token)

class CvtEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.stages = nn.ModuleList([])
        for stage_idx in range(len(config.depth)):
            self.stages.append(CvtStage(config, stage_idx))

    def forward(self, pixel_values, output_hidden_states=False, return_dict=True):
        all_hidden_states = () if output_hidden_states else None
        hidden_state = pixel_values
        cls_token = None
        for (_, stage_module) in enumerate(self.stages):
            (hidden_state, cls_token) = stage_module(hidden_state)
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_state,)
        if not return_dict:
            return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if v is not None))
        return BaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states)

class CvtPreTrainedModel(PreTrainedModel):
    config_class = CvtConfig
    base_model_prefix = 'cvt'
    main_input_name = 'pixel_values'
    _no_split_modules = ['CvtLayer']

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, nn.Conv2d)):
            module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        elif isinstance(module, CvtStage):
            if self.config.cls_token[module.stage]:
                module.cls_token.data = nn.init.trunc_normal_(torch.zeros(1, 1, self.config.embed_dim[-1]), mean=0.0, std=self.config.initializer_range)
CVT_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`CvtConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
CVT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]\n            for details.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.', CVT_START_DOCSTRING)
class CvtModel(CvtPreTrainedModel):

    def __init__(self, config, add_pooling_layer=True):
        super().__init__(config)
        self.config = config
        self.encoder = CvtEncoder(config)
        self.post_init()

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithCLSToken]:
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = encoder_outputs[0]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1:]
        return BaseModelOutputWithCLSToken(last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states)

@add_start_docstrings('\n    Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n    the [CLS] token) e.g. for ImageNet.\n    ', CVT_START_DOCSTRING)
class CvtForImageClassification(CvtPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.cvt = CvtModel(config, add_pooling_layer=False)
        self.layernorm = nn.LayerNorm(config.embed_dim[-1])
        self.classifier = nn.Linear(config.embed_dim[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.cvt(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        cls_token = outputs[1]
        if self.config.cls_token[-1]:
            sequence_output = self.layernorm(cls_token)
        else:
            (batch_size, num_channels, height, width) = sequence_output.shape
            sequence_output = sequence_output.view(batch_size, num_channels, height * width).permute(0, 2, 1)
            sequence_output = self.layernorm(sequence_output)
        sequence_output_mean = sequence_output.mean(dim=1)
        logits = self.classifier(sequence_output_mean)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.config.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.config.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

# File: transformers-main/src/transformers/models/cvt/modeling_tf_cvt.py
""""""
from __future__ import annotations
import collections.abc
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...modeling_tf_outputs import TFImageClassifierOutputWithNoAttention
from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import shape_list, stable_softmax
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_cvt import CvtConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'CvtConfig'

@dataclass
class TFBaseModelOutputWithCLSToken(ModelOutput):
    last_hidden_state: tf.Tensor = None
    cls_token_value: tf.Tensor = None
    hidden_states: Tuple[tf.Tensor, ...] | None = None

class TFCvtDropPath(keras.layers.Layer):

    def __init__(self, drop_prob: float, **kwargs):
        super().__init__(**kwargs)
        self.drop_prob = drop_prob

    def call(self, x: tf.Tensor, training=None):
        if self.drop_prob == 0.0 or not training:
            return x
        keep_prob = 1 - self.drop_prob
        shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
        random_tensor = keep_prob + tf.random.uniform(shape, 0, 1, dtype=self.compute_dtype)
        random_tensor = tf.floor(random_tensor)
        return x / keep_prob * random_tensor

class TFCvtEmbeddings(keras.layers.Layer):

    def __init__(self, config: CvtConfig, patch_size: int, num_channels: int, embed_dim: int, stride: int, padding: int, dropout_rate: float, **kwargs):
        super().__init__(**kwargs)
        self.convolution_embeddings = TFCvtConvEmbeddings(config, patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding, name='convolution_embeddings')
        self.dropout = keras.layers.Dropout(dropout_rate)

    def call(self, pixel_values: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.convolution_embeddings(pixel_values)
        hidden_state = self.dropout(hidden_state, training=training)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution_embeddings', None) is not None:
            with tf.name_scope(self.convolution_embeddings.name):
                self.convolution_embeddings.build(None)

class TFCvtConvEmbeddings(keras.layers.Layer):

    def __init__(self, config: CvtConfig, patch_size: int, num_channels: int, embed_dim: int, stride: int, padding: int, **kwargs):
        super().__init__(**kwargs)
        self.padding = keras.layers.ZeroPadding2D(padding=padding)
        self.patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        self.projection = keras.layers.Conv2D(filters=embed_dim, kernel_size=patch_size, strides=stride, padding='valid', data_format='channels_last', kernel_initializer=get_initializer(config.initializer_range), name='projection')
        self.normalization = keras.layers.LayerNormalization(epsilon=1e-05, name='normalization')
        self.num_channels = num_channels
        self.embed_dim = embed_dim

    def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
        if isinstance(pixel_values, dict):
            pixel_values = pixel_values['pixel_values']
        pixel_values = self.projection(self.padding(pixel_values))
        (batch_size, height, width, num_channels) = shape_list(pixel_values)
        hidden_size = height * width
        pixel_values = tf.reshape(pixel_values, shape=(batch_size, hidden_size, num_channels))
        pixel_values = self.normalization(pixel_values)
        pixel_values = tf.reshape(pixel_values, shape=(batch_size, height, width, num_channels))
        return pixel_values

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'projection', None) is not None:
            with tf.name_scope(self.projection.name):
                self.projection.build([None, None, None, self.num_channels])
        if getattr(self, 'normalization', None) is not None:
            with tf.name_scope(self.normalization.name):
                self.normalization.build([None, None, self.embed_dim])

class TFCvtSelfAttentionConvProjection(keras.layers.Layer):

    def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, **kwargs):
        super().__init__(**kwargs)
        self.padding = keras.layers.ZeroPadding2D(padding=padding)
        self.convolution = keras.layers.Conv2D(filters=embed_dim, kernel_size=kernel_size, kernel_initializer=get_initializer(config.initializer_range), padding='valid', strides=stride, use_bias=False, name='convolution', groups=embed_dim)
        self.normalization = keras.layers.BatchNormalization(epsilon=1e-05, momentum=0.9, name='normalization')
        self.embed_dim = embed_dim

    def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.convolution(self.padding(hidden_state))
        hidden_state = self.normalization(hidden_state, training=training)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution', None) is not None:
            with tf.name_scope(self.convolution.name):
                self.convolution.build([None, None, None, self.embed_dim])
        if getattr(self, 'normalization', None) is not None:
            with tf.name_scope(self.normalization.name):
                self.normalization.build([None, None, None, self.embed_dim])

class TFCvtSelfAttentionLinearProjection(keras.layers.Layer):

    def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
        (batch_size, height, width, num_channels) = shape_list(hidden_state)
        hidden_size = height * width
        hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
        return hidden_state

class TFCvtSelfAttentionProjection(keras.layers.Layer):

    def __init__(self, config: CvtConfig, embed_dim: int, kernel_size: int, stride: int, padding: int, projection_method: str='dw_bn', **kwargs):
        super().__init__(**kwargs)
        if projection_method == 'dw_bn':
            self.convolution_projection = TFCvtSelfAttentionConvProjection(config, embed_dim, kernel_size, stride, padding, name='convolution_projection')
        self.linear_projection = TFCvtSelfAttentionLinearProjection()

    def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.convolution_projection(hidden_state, training=training)
        hidden_state = self.linear_projection(hidden_state)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution_projection', None) is not None:
            with tf.name_scope(self.convolution_projection.name):
                self.convolution_projection.build(None)

class TFCvtSelfAttention(keras.layers.Layer):

    def __init__(self, config: CvtConfig, num_heads: int, embed_dim: int, kernel_size: int, stride_q: int, stride_kv: int, padding_q: int, padding_kv: int, qkv_projection_method: str, qkv_bias: bool, attention_drop_rate: float, with_cls_token: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.scale = embed_dim ** (-0.5)
        self.with_cls_token = with_cls_token
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.convolution_projection_query = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_q, padding_q, projection_method='linear' if qkv_projection_method == 'avg' else qkv_projection_method, name='convolution_projection_query')
        self.convolution_projection_key = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_kv, padding_kv, projection_method=qkv_projection_method, name='convolution_projection_key')
        self.convolution_projection_value = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_kv, padding_kv, projection_method=qkv_projection_method, name='convolution_projection_value')
        self.projection_query = keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_query')
        self.projection_key = keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_key')
        self.projection_value = keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_value')
        self.dropout = keras.layers.Dropout(attention_drop_rate)

    def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
        (batch_size, hidden_size, _) = shape_list(hidden_state)
        head_dim = self.embed_dim // self.num_heads
        hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim))
        hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
        return hidden_state

    def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool=False) -> tf.Tensor:
        if self.with_cls_token:
            (cls_token, hidden_state) = tf.split(hidden_state, [1, height * width], 1)
        (batch_size, hidden_size, num_channels) = shape_list(hidden_state)
        hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
        key = self.convolution_projection_key(hidden_state, training=training)
        query = self.convolution_projection_query(hidden_state, training=training)
        value = self.convolution_projection_value(hidden_state, training=training)
        if self.with_cls_token:
            query = tf.concat((cls_token, query), axis=1)
            key = tf.concat((cls_token, key), axis=1)
            value = tf.concat((cls_token, value), axis=1)
        head_dim = self.embed_dim // self.num_heads
        query = self.rearrange_for_multi_head_attention(self.projection_query(query))
        key = self.rearrange_for_multi_head_attention(self.projection_key(key))
        value = self.rearrange_for_multi_head_attention(self.projection_value(value))
        attention_score = tf.matmul(query, key, transpose_b=True) * self.scale
        attention_probs = stable_softmax(logits=attention_score, axis=-1)
        attention_probs = self.dropout(attention_probs, training=training)
        context = tf.matmul(attention_probs, value)
        (_, _, hidden_size, _) = shape_list(context)
        context = tf.transpose(context, perm=(0, 2, 1, 3))
        context = tf.reshape(context, (batch_size, hidden_size, self.num_heads * head_dim))
        return context

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution_projection_query', None) is not None:
            with tf.name_scope(self.convolution_projection_query.name):
                self.convolution_projection_query.build(None)
        if getattr(self, 'convolution_projection_key', None) is not None:
            with tf.name_scope(self.convolution_projection_key.name):
                self.convolution_projection_key.build(None)
        if getattr(self, 'convolution_projection_value', None) is not None:
            with tf.name_scope(self.convolution_projection_value.name):
                self.convolution_projection_value.build(None)
        if getattr(self, 'projection_query', None) is not None:
            with tf.name_scope(self.projection_query.name):
                self.projection_query.build([None, None, self.embed_dim])
        if getattr(self, 'projection_key', None) is not None:
            with tf.name_scope(self.projection_key.name):
                self.projection_key.build([None, None, self.embed_dim])
        if getattr(self, 'projection_value', None) is not None:
            with tf.name_scope(self.projection_value.name):
                self.projection_value.build([None, None, self.embed_dim])

class TFCvtSelfOutput(keras.layers.Layer):

    def __init__(self, config: CvtConfig, embed_dim: int, drop_rate: float, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(drop_rate)
        self.embed_dim = embed_dim

    def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.dense(inputs=hidden_state)
        hidden_state = self.dropout(inputs=hidden_state, training=training)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.embed_dim])

class TFCvtAttention(keras.layers.Layer):

    def __init__(self, config: CvtConfig, num_heads: int, embed_dim: int, kernel_size: int, stride_q: int, stride_kv: int, padding_q: int, padding_kv: int, qkv_projection_method: str, qkv_bias: bool, attention_drop_rate: float, drop_rate: float, with_cls_token: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFCvtSelfAttention(config, num_heads, embed_dim, kernel_size, stride_q, stride_kv, padding_q, padding_kv, qkv_projection_method, qkv_bias, attention_drop_rate, with_cls_token, name='attention')
        self.dense_output = TFCvtSelfOutput(config, embed_dim, drop_rate, name='output')

    def prune_heads(self, heads):
        raise NotImplementedError

    def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool=False):
        self_output = self.attention(hidden_state, height, width, training=training)
        attention_output = self.dense_output(self_output, training=training)
        return attention_output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class TFCvtIntermediate(keras.layers.Layer):

    def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=int(embed_dim * mlp_ratio), kernel_initializer=get_initializer(config.initializer_range), activation='gelu', name='dense')
        self.embed_dim = embed_dim

    def call(self, hidden_state: tf.Tensor) -> tf.Tensor:
        hidden_state = self.dense(hidden_state)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.embed_dim])

class TFCvtOutput(keras.layers.Layer):

    def __init__(self, config: CvtConfig, embed_dim: int, mlp_ratio: int, drop_rate: int, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(drop_rate)
        self.embed_dim = embed_dim
        self.mlp_ratio = mlp_ratio

    def call(self, hidden_state: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.dense(inputs=hidden_state)
        hidden_state = self.dropout(inputs=hidden_state, training=training)
        hidden_state = hidden_state + input_tensor
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, int(self.embed_dim * self.mlp_ratio)])

class TFCvtLayer(keras.layers.Layer):

    def __init__(self, config: CvtConfig, num_heads: int, embed_dim: int, kernel_size: int, stride_q: int, stride_kv: int, padding_q: int, padding_kv: int, qkv_projection_method: str, qkv_bias: bool, attention_drop_rate: float, drop_rate: float, mlp_ratio: float, drop_path_rate: float, with_cls_token: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFCvtAttention(config, num_heads, embed_dim, kernel_size, stride_q, stride_kv, padding_q, padding_kv, qkv_projection_method, qkv_bias, attention_drop_rate, drop_rate, with_cls_token, name='attention')
        self.intermediate = TFCvtIntermediate(config, embed_dim, mlp_ratio, name='intermediate')
        self.dense_output = TFCvtOutput(config, embed_dim, mlp_ratio, drop_rate, name='output')
        self.drop_path = TFCvtDropPath(drop_path_rate, name='drop_path') if drop_path_rate > 0.0 else keras.layers.Activation('linear', name='drop_path')
        self.layernorm_before = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_before')
        self.layernorm_after = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm_after')
        self.embed_dim = embed_dim

    def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool=False) -> tf.Tensor:
        attention_output = self.attention(self.layernorm_before(hidden_state), height, width, training=training)
        attention_output = self.drop_path(attention_output, training=training)
        hidden_state = attention_output + hidden_state
        layer_output = self.layernorm_after(hidden_state)
        layer_output = self.intermediate(layer_output)
        layer_output = self.dense_output(layer_output, hidden_state)
        layer_output = self.drop_path(layer_output, training=training)
        return layer_output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)
        if getattr(self, 'layernorm_before', None) is not None:
            with tf.name_scope(self.layernorm_before.name):
                self.layernorm_before.build([None, None, self.embed_dim])
        if getattr(self, 'layernorm_after', None) is not None:
            with tf.name_scope(self.layernorm_after.name):
                self.layernorm_after.build([None, None, self.embed_dim])

class TFCvtStage(keras.layers.Layer):

    def __init__(self, config: CvtConfig, stage: int, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.stage = stage
        if self.config.cls_token[self.stage]:
            self.cls_token = self.add_weight(shape=(1, 1, self.config.embed_dim[-1]), initializer=get_initializer(self.config.initializer_range), trainable=True, name='cvt.encoder.stages.2.cls_token')
        self.embedding = TFCvtEmbeddings(self.config, patch_size=config.patch_sizes[self.stage], num_channels=config.num_channels if self.stage == 0 else config.embed_dim[self.stage - 1], stride=config.patch_stride[self.stage], embed_dim=config.embed_dim[self.stage], padding=config.patch_padding[self.stage], dropout_rate=config.drop_rate[self.stage], name='embedding')
        drop_path_rates = tf.linspace(0.0, config.drop_path_rate[self.stage], config.depth[stage])
        drop_path_rates = [x.numpy().item() for x in drop_path_rates]
        self.layers = [TFCvtLayer(config, num_heads=config.num_heads[self.stage], embed_dim=config.embed_dim[self.stage], kernel_size=config.kernel_qkv[self.stage], stride_q=config.stride_q[self.stage], stride_kv=config.stride_kv[self.stage], padding_q=config.padding_q[self.stage], padding_kv=config.padding_kv[self.stage], qkv_projection_method=config.qkv_projection_method[self.stage], qkv_bias=config.qkv_bias[self.stage], attention_drop_rate=config.attention_drop_rate[self.stage], drop_rate=config.drop_rate[self.stage], mlp_ratio=config.mlp_ratio[self.stage], drop_path_rate=drop_path_rates[self.stage], with_cls_token=config.cls_token[self.stage], name=f'layers.{j}') for j in range(config.depth[self.stage])]

    def call(self, hidden_state: tf.Tensor, training: bool=False):
        cls_token = None
        hidden_state = self.embedding(hidden_state, training)
        (batch_size, height, width, num_channels) = shape_list(hidden_state)
        hidden_size = height * width
        hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, num_channels))
        if self.config.cls_token[self.stage]:
            cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
            hidden_state = tf.concat((cls_token, hidden_state), axis=1)
        for layer in self.layers:
            layer_outputs = layer(hidden_state, height, width, training=training)
            hidden_state = layer_outputs
        if self.config.cls_token[self.stage]:
            (cls_token, hidden_state) = tf.split(hidden_state, [1, height * width], 1)
        hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
        return (hidden_state, cls_token)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embedding', None) is not None:
            with tf.name_scope(self.embedding.name):
                self.embedding.build(None)
        if getattr(self, 'layers', None) is not None:
            for layer in self.layers:
                with tf.name_scope(layer.name):
                    layer.build(None)

class TFCvtEncoder(keras.layers.Layer):
    config_class = CvtConfig

    def __init__(self, config: CvtConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.stages = [TFCvtStage(config, stage_idx, name=f'stages.{stage_idx}') for stage_idx in range(len(config.depth))]

    def call(self, pixel_values: TFModelInputType, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
        all_hidden_states = () if output_hidden_states else None
        hidden_state = pixel_values
        hidden_state = tf.transpose(hidden_state, perm=(0, 2, 3, 1))
        cls_token = None
        for (_, stage_module) in enumerate(self.stages):
            (hidden_state, cls_token) = stage_module(hidden_state, training=training)
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_state,)
        hidden_state = tf.transpose(hidden_state, perm=(0, 3, 1, 2))
        if output_hidden_states:
            all_hidden_states = tuple([tf.transpose(hs, perm=(0, 3, 1, 2)) for hs in all_hidden_states])
        if not return_dict:
            return tuple((v for v in [hidden_state, cls_token, all_hidden_states] if v is not None))
        return TFBaseModelOutputWithCLSToken(last_hidden_state=hidden_state, cls_token_value=cls_token, hidden_states=all_hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'stages', None) is not None:
            for layer in self.stages:
                with tf.name_scope(layer.name):
                    layer.build(None)

@keras_serializable
class TFCvtMainLayer(keras.layers.Layer):
    config_class = CvtConfig

    def __init__(self, config: CvtConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.encoder = TFCvtEncoder(config, name='encoder')

    @unpack_inputs
    def call(self, pixel_values: TFModelInputType | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1:]
        return TFBaseModelOutputWithCLSToken(last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)

class TFCvtPreTrainedModel(TFPreTrainedModel):
    config_class = CvtConfig
    base_model_prefix = 'cvt'
    main_input_name = 'pixel_values'
TFCVT_START_DOCSTRING = '\n\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TF 2.0 models accepts two formats as inputs:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional arguments.\n\n    This second option is useful when using [`keras.Model.fit`] method which currently requires having all the\n    tensors in the first argument of the model call function: `model(inputs)`.\n\n    \n\n    Args:\n        config ([`CvtConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
TFCVT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CvtImageProcessor.__call__`]\n            for details.\n\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n            used instead.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n            eager mode, in graph mode the value will always be set to True.\n        training (`bool`, *optional*, defaults to `False``):\n            Whether or not to use the model in training mode (some modules like dropout modules have different\n            behaviors between training and evaluation).\n'

@add_start_docstrings('The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.', TFCVT_START_DOCSTRING)
class TFCvtModel(TFCvtPreTrainedModel):

    def __init__(self, config: CvtConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.cvt = TFCvtMainLayer(config, name='cvt')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFBaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: tf.Tensor | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithCLSToken, Tuple[tf.Tensor]]:
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        outputs = self.cvt(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        if not return_dict:
            return (outputs[0],) + outputs[1:]
        return TFBaseModelOutputWithCLSToken(last_hidden_state=outputs.last_hidden_state, cls_token_value=outputs.cls_token_value, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'cvt', None) is not None:
            with tf.name_scope(self.cvt.name):
                self.cvt.build(None)

@add_start_docstrings('\n    Cvt Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n    the [CLS] token) e.g. for ImageNet.\n    ', TFCVT_START_DOCSTRING)
class TFCvtForImageClassification(TFCvtPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: CvtConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.cvt = TFCvtMainLayer(config, name='cvt')
        self.layernorm = keras.layers.LayerNormalization(epsilon=1e-05, name='layernorm')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), use_bias=True, bias_initializer='zeros', name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(TFCVT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: tf.Tensor | None=None, labels: tf.Tensor | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
        outputs = self.cvt(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        cls_token = outputs[1]
        if self.config.cls_token[-1]:
            sequence_output = self.layernorm(cls_token)
        else:
            (batch_size, num_channels, height, width) = shape_list(sequence_output)
            sequence_output = tf.reshape(sequence_output, shape=(batch_size, num_channels, height * width))
            sequence_output = tf.transpose(sequence_output, perm=(0, 2, 1))
            sequence_output = self.layernorm(sequence_output)
        sequence_output_mean = tf.reduce_mean(sequence_output, axis=1)
        logits = self.classifier(sequence_output_mean)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'cvt', None) is not None:
            with tf.name_scope(self.cvt.name):
                self.cvt.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, self.config.embed_dim[-1]])
        if getattr(self, 'classifier', None) is not None:
            if hasattr(self.classifier, 'name'):
                with tf.name_scope(self.classifier.name):
                    self.classifier.build([None, None, self.config.embed_dim[-1]])

# File: transformers-main/src/transformers/models/dac/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {'configuration_dac': ['DacConfig'], 'feature_extraction_dac': ['DacFeatureExtractor']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_dac'] = ['DacModel', 'DacPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_dac import DacConfig
    from .feature_extraction_dac import DacFeatureExtractor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_dac import DacModel, DacPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/dac/configuration_dac.py
""""""
import math
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class DacConfig(PretrainedConfig):
    model_type = 'dac'

    def __init__(self, encoder_hidden_size=64, downsampling_ratios=[2, 4, 8, 8], decoder_hidden_size=1536, n_codebooks=9, codebook_size=1024, codebook_dim=8, quantizer_dropout=0, commitment_loss_weight=0.25, codebook_loss_weight=1.0, sampling_rate=16000, **kwargs):
        self.encoder_hidden_size = encoder_hidden_size
        self.downsampling_ratios = downsampling_ratios
        self.decoder_hidden_size = decoder_hidden_size
        self.upsampling_ratios = downsampling_ratios[::-1]
        self.n_codebooks = n_codebooks
        self.codebook_size = codebook_size
        self.codebook_dim = codebook_dim
        self.quantizer_dropout = quantizer_dropout
        self.sampling_rate = sampling_rate
        self.hidden_size = encoder_hidden_size * 2 ** len(downsampling_ratios)
        self.hop_length = int(np.prod(downsampling_ratios))
        self.commitment_loss_weight = commitment_loss_weight
        self.codebook_loss_weight = codebook_loss_weight
        super().__init__(**kwargs)

    @property
    def frame_rate(self) -> int:
        hop_length = np.prod(self.upsampling_ratios)
        return math.ceil(self.sampling_rate / hop_length)

# File: transformers-main/src/transformers/models/dac/convert_dac_checkpoint.py
import argparse
import fnmatch
import re
import torch
from transformers import DacConfig, DacFeatureExtractor, DacModel, logging
logging.set_verbosity_info()
logger = logging.get_logger('transformers.models.dac')

def match_pattern(string, pattern):
    pattern_parts = pattern.split('.')
    string_parts = string.split('.')
    pattern_block_count = string_block_count = 0
    for part in pattern_parts:
        if part.startswith('block'):
            pattern_block_count += 1
    for part in string_parts:
        if part.startswith('block'):
            string_block_count += 1
    return fnmatch.fnmatch(string, pattern) and string_block_count == pattern_block_count
TOP_LEVEL_KEYS = []
IGNORE_KEYS = []
MAPPING_ENCODER = {'encoder.block.0': ['encoder.conv1'], 'encoder.block.5': ['encoder.snake1'], 'encoder.block.6': ['encoder.conv2'], 'encoder.block.*.block.*.block.0'.replace('*', '\\d+'): ['encoder.block', 'res_unit', 'snake1'], 'encoder.block.*.block.*.block.1'.replace('*', '\\d+'): ['encoder.block', 'res_unit', 'conv1'], 'encoder.block.*.block.*.block.2'.replace('*', '\\d+'): ['encoder.block', 'res_unit', 'snake2'], 'encoder.block.*.block.*.block.3'.replace('*', '\\d+'): ['encoder.block', 'res_unit', 'conv2'], 'encoder.block.*.block.3'.replace('*', '\\d+'): ['encoder.block', 'snake1'], 'encoder.block.*.block.4'.replace('*', '\\d+'): ['encoder.block', 'conv1']}
MAPPING_QUANTIZER = {'quantizer.quantizers.*': ['quantizer.quantizers.*']}
MAPPING_DECODER = {'decoder.model.0': ['decoder.conv1'], 'decoder.model.5': ['decoder.snake1'], 'decoder.model.6': ['decoder.conv2'], 'decoder.model.*.block.0'.replace('*', '\\d+'): ['decoder.block', 'snake1'], 'decoder.model.*.block.1'.replace('*', '\\d+'): ['decoder.block', 'conv_t1'], 'decoder.model.*.block.*.block.0'.replace('*', '\\d+'): ['decoder.block', 'res_unit', 'snake1'], 'decoder.model.*.block.*.block.1'.replace('*', '\\d+'): ['decoder.block', 'res_unit', 'conv1'], 'decoder.model.*.block.*.block.2'.replace('*', '\\d+'): ['decoder.block', 'res_unit', 'snake2'], 'decoder.model.*.block.*.block.3'.replace('*', '\\d+'): ['decoder.block', 'res_unit', 'conv2']}
MAPPING = {**MAPPING_ENCODER, **MAPPING_QUANTIZER, **MAPPING_DECODER}

def set_recursively(hf_pointer, key, value, full_name, weight_type):
    for attribute in key.split('.'):
        hf_pointer = getattr(hf_pointer, attribute)
    if weight_type is not None:
        hf_shape = getattr(hf_pointer, weight_type).shape
    else:
        hf_shape = hf_pointer.shape
    if hf_shape != value.shape:
        raise ValueError(f"Shape of hf {(key + '.' + weight_type if weight_type is not None else '')} is {hf_shape}, but should be {value.shape} for {full_name}")
    if weight_type == 'weight':
        hf_pointer.weight.data = value
    elif weight_type == 'weight_g':
        hf_pointer.weight_g.data = value
    elif weight_type == 'weight_v':
        hf_pointer.weight_v.data = value
    elif weight_type == 'bias':
        hf_pointer.bias.data = value
    elif weight_type == 'alpha':
        hf_pointer.alpha.data = value
    logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")

def should_ignore(name, ignore_keys):
    for key in ignore_keys:
        if key.endswith('.*'):
            if name.startswith(key[:-1]):
                return True
        elif '.*.' in key:
            (prefix, suffix) = key.split('.*.')
            if prefix in name and suffix in name:
                return True
        elif key in name:
            return True
    return False

def recursively_load_weights(orig_dict, hf_model, model_name):
    unused_weights = []
    if model_name not in ['dac_16khz', 'dac_24khz', 'dac_44khz']:
        raise ValueError(f'Unsupported model: {model_name}')
    for (name, value) in orig_dict.items():
        is_used = False
        for (key, mapped_key) in MAPPING.items():
            regex = re.compile(key)
            if regex.search(name):
                if len(mapped_key) == 1:
                    if mapped_key[0][0] == 'q':
                        mapped_key = '.'.join(name.split('.')[:-1])
                    else:
                        mapped_key = mapped_key[0]
                elif len(mapped_key) == 3:
                    integers = re.findall('\\b\\d+\\b', name)
                    if mapped_key[0][0] == 'd':
                        mapped_key = '{}.{}.{}{}.{}'.format(mapped_key[0], str(int(integers[0]) - 1), mapped_key[1], str(int(integers[1]) - 1), mapped_key[2])
                    else:
                        mapped_key = '{}.{}.{}{}.{}'.format(mapped_key[0], str(int(integers[0]) - 1), mapped_key[1], str(int(integers[1]) + 1), mapped_key[2])
                elif len(mapped_key) == 2:
                    integers = re.findall('\\b\\d+\\b', name)
                    mapped_key = '{}.{}.{}'.format(mapped_key[0], str(int(integers[0]) - 1), mapped_key[1])
                is_used = True
                if 'weight_g' in name:
                    weight_type = 'weight_g'
                elif 'weight_v' in name:
                    weight_type = 'weight_v'
                elif 'bias' in name:
                    weight_type = 'bias'
                elif 'alpha' in name:
                    weight_type = 'alpha'
                elif 'weight' in name:
                    weight_type = 'weight'
                set_recursively(hf_model, mapped_key, value, name, weight_type)
        if not is_used:
            unused_weights.append(name)
    print(list(set(unused_weights)))
    logger.warning(f'Unused weights: {unused_weights}')

@torch.no_grad()
def convert_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, sample_rate=16000, repo_id=None):
    model_dict = torch.load(checkpoint_path, 'cpu')
    config = DacConfig()
    metadata = model_dict['metadata']['kwargs']
    config.encoder_hidden_size = metadata['encoder_dim']
    config.downsampling_ratios = metadata['encoder_rates']
    config.codebook_size = metadata['codebook_size']
    config.n_codebooks = metadata['n_codebooks']
    config.codebook_dim = metadata['codebook_dim']
    config.decoder_hidden_size = metadata['decoder_dim']
    config.upsampling_ratios = metadata['decoder_rates']
    config.quantizer_dropout = float(metadata['quantizer_dropout'])
    config.sampling_rate = sample_rate
    model = DacModel(config)
    feature_extractor = DacFeatureExtractor()
    feature_extractor.sampling_rate = sample_rate
    original_checkpoint = model_dict['state_dict']
    model.apply_weight_norm()
    recursively_load_weights(original_checkpoint, model, model_name)
    model.remove_weight_norm()
    model.save_pretrained(pytorch_dump_folder_path)
    if repo_id:
        print('Pushing to the hub...')
        feature_extractor.push_to_hub(repo_id)
        model.push_to_hub(repo_id)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', default='dac_44khz', type=str, help="The model to convert. Should be one of 'dac_16khz', 'dac_24khz', 'dac_44khz'.")
    parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
    parser.add_argument('--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.')
    parser.add_argument('--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.')
    parser.add_argument('--sample_rate', default=None, type=str, help='Sample rate used by DacFeatureExtractor')
    args = parser.parse_args()
    convert_checkpoint(args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.sample_rate, args.push_to_hub)

# File: transformers-main/src/transformers/models/dac/feature_extraction_dac.py
""""""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)

class DacFeatureExtractor(SequenceFeatureExtractor):
    model_input_names = ['input_values', 'n_quantizers']

    def __init__(self, feature_size: int=1, sampling_rate: int=16000, padding_value: float=0.0, hop_length: int=512, **kwargs):
        super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
        self.hop_length = hop_length

    def __call__(self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature:
        if sampling_rate is not None:
            if sampling_rate != self.sampling_rate:
                raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided audio input was sampled with {self.sampling_rate} and not {sampling_rate}.')
        else:
            logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.')
        if padding and truncation:
            raise ValueError('Both padding and truncation were set. Make sure you only set one.')
        elif padding is None:
            padding = True
        is_batched = bool(isinstance(raw_audio, (list, tuple)) and isinstance(raw_audio[0], (np.ndarray, tuple, list)))
        if is_batched:
            raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
        elif not is_batched and (not isinstance(raw_audio, np.ndarray)):
            raw_audio = np.asarray(raw_audio, dtype=np.float32)
        elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
            raw_audio = raw_audio.astype(np.float32)
        if not is_batched:
            raw_audio = [np.asarray(raw_audio).T]
        for (idx, example) in enumerate(raw_audio):
            if example.ndim > 2:
                raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}')
            if self.feature_size == 1 and example.ndim != 1:
                raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels')
            if self.feature_size == 2:
                raise ValueError("Stereo audio isn't supported for now")
        input_values = BatchFeature({'input_values': raw_audio})
        padded_inputs = self.pad(input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=False, pad_to_multiple_of=self.hop_length)
        if padding:
            padded_inputs.input_values = padded_inputs.input_values[:, np.newaxis, :]
        input_values = []
        for example in padded_inputs.pop('input_values'):
            if self.feature_size == 1:
                example = example[..., None]
            input_values.append(example.T)
        padded_inputs['input_values'] = input_values
        if return_tensors is not None:
            padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
        return padded_inputs

# File: transformers-main/src/transformers/models/dac/modeling_dac.py
""""""
import math
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_dac import DacConfig
_CONFIG_FOR_DOC = 'DacConfig'

@dataclass
class DacOutput(ModelOutput):
    loss: torch.FloatTensor = None
    audio_values: torch.FloatTensor = None
    quantized_representation: torch.FloatTensor = None
    audio_codes: torch.LongTensor = None
    projected_latents: torch.FloatTensor = None

@dataclass
class DacEncoderOutput(ModelOutput):
    loss: torch.FloatTensor = None
    quantized_representation: torch.FloatTensor = None
    audio_codes: torch.FloatTensor = None
    projected_latents: torch.FloatTensor = None

@dataclass
class DacDecoderOutput(ModelOutput):
    audio_values: torch.FloatTensor = None

class Snake1d(nn.Module):

    def __init__(self, hidden_dim):
        super().__init__()
        self.alpha = nn.Parameter(torch.ones(1, hidden_dim, 1))

    def forward(self, hidden_states):
        shape = hidden_states.shape
        hidden_states = hidden_states.reshape(shape[0], shape[1], -1)
        hidden_states = hidden_states + (self.alpha + 1e-09).reciprocal() * torch.sin(self.alpha * hidden_states).pow(2)
        hidden_states = hidden_states.reshape(shape)
        return hidden_states

class DacVectorQuantize(nn.Module):

    def __init__(self, config: DacConfig):
        super().__init__()
        self.in_proj = nn.Conv1d(config.hidden_size, config.codebook_dim, kernel_size=1)
        self.out_proj = nn.Conv1d(config.codebook_dim, config.hidden_size, kernel_size=1)
        self.codebook = nn.Embedding(config.codebook_size, config.codebook_dim)

    def forward(self, hidden_state):
        projected_latents = self.in_proj(hidden_state)
        (quantized_representation, audio_codes) = self.decode_latents(projected_latents)
        commitment_loss = F.mse_loss(projected_latents, quantized_representation.detach(), reduction='mean')
        codebook_loss = F.mse_loss(quantized_representation, projected_latents.detach(), reduction='mean')
        quantized_representation = projected_latents + (quantized_representation - projected_latents).detach()
        quantized_representation = self.out_proj(quantized_representation)
        return (quantized_representation, commitment_loss, codebook_loss, audio_codes, projected_latents)

    def decode_latents(self, hidden_states):
        (batch_size, hidden_dim, sequence_length) = hidden_states.shape
        encodings = hidden_states.permute(0, 2, 1).reshape(batch_size * sequence_length, hidden_dim)
        codebook = self.codebook.weight
        encodings = F.normalize(encodings)
        codebook = F.normalize(codebook)
        l2_norm = encodings.pow(2).sum(1, keepdim=True)
        dist = -(l2_norm - 2 * encodings @ codebook.t()) + codebook.pow(2).sum(1, keepdim=True).t()
        indices = dist.max(1)[1]
        indices = indices.reshape(hidden_states.size(0), -1)
        quantized_representation = self.codebook(indices).transpose(1, 2)
        return (quantized_representation, indices)

class DacResidualUnit(nn.Module):

    def __init__(self, dimension: int=16, dilation: int=1):
        super().__init__()
        pad = (7 - 1) * dilation // 2
        self.snake1 = Snake1d(dimension)
        self.conv1 = nn.Conv1d(dimension, dimension, kernel_size=7, dilation=dilation, padding=pad)
        self.snake2 = Snake1d(dimension)
        self.conv2 = nn.Conv1d(dimension, dimension, kernel_size=1)

    def forward(self, hidden_state):
        output_tensor = hidden_state
        output_tensor = self.conv1(self.snake1(output_tensor))
        output_tensor = self.conv2(self.snake2(output_tensor))
        padding = (hidden_state.shape[-1] - output_tensor.shape[-1]) // 2
        if padding > 0:
            hidden_state = hidden_state[..., padding:-padding]
        output_tensor = hidden_state + output_tensor
        return output_tensor

class DacEncoderBlock(nn.Module):

    def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1):
        super().__init__()
        dimension = config.encoder_hidden_size * 2 ** stride_index
        self.res_unit1 = DacResidualUnit(dimension // 2, dilation=1)
        self.res_unit2 = DacResidualUnit(dimension // 2, dilation=3)
        self.res_unit3 = DacResidualUnit(dimension // 2, dilation=9)
        self.snake1 = Snake1d(dimension // 2)
        self.conv1 = nn.Conv1d(dimension // 2, dimension, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2))

    def forward(self, hidden_state):
        hidden_state = self.res_unit1(hidden_state)
        hidden_state = self.res_unit2(hidden_state)
        hidden_state = self.snake1(self.res_unit3(hidden_state))
        hidden_state = self.conv1(hidden_state)
        return hidden_state

class DacDecoderBlock(nn.Module):

    def __init__(self, config: DacConfig, stride: int=1, stride_index: int=1):
        super().__init__()
        input_dim = config.decoder_hidden_size // 2 ** stride_index
        output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1)
        self.snake1 = Snake1d(input_dim)
        self.conv_t1 = nn.ConvTranspose1d(input_dim, output_dim, kernel_size=2 * stride, stride=stride, padding=math.ceil(stride / 2))
        self.res_unit1 = DacResidualUnit(output_dim, dilation=1)
        self.res_unit2 = DacResidualUnit(output_dim, dilation=3)
        self.res_unit3 = DacResidualUnit(output_dim, dilation=9)

    def forward(self, hidden_state):
        hidden_state = self.snake1(hidden_state)
        hidden_state = self.conv_t1(hidden_state)
        hidden_state = self.res_unit1(hidden_state)
        hidden_state = self.res_unit2(hidden_state)
        hidden_state = self.res_unit3(hidden_state)
        return hidden_state

class DacResidualVectorQuantize(nn.Module):

    def __init__(self, config: DacConfig):
        super().__init__()
        n_codebooks = config.n_codebooks
        quantizer_dropout = config.quantizer_dropout
        self.n_codebooks = n_codebooks
        self.quantizers = nn.ModuleList([DacVectorQuantize(config) for i in range(config.n_codebooks)])
        self.quantizer_dropout = quantizer_dropout

    def forward(self, hidden_state, n_quantizers: int=None):
        quantized_representation = 0
        residual = hidden_state
        commitment_loss = 0
        codebook_loss = 0
        audio_codes = []
        projected_latents = []
        n_quantizers = n_quantizers if n_quantizers is not None else self.n_codebooks
        if self.training:
            n_quantizers = torch.ones((hidden_state.shape[0],)) * self.n_codebooks + 1
            dropout = torch.randint(1, self.n_codebooks + 1, (hidden_state.shape[0],))
            n_dropout = int(hidden_state.shape[0] * self.quantizer_dropout)
            n_quantizers[:n_dropout] = dropout[:n_dropout]
            n_quantizers = n_quantizers.to(hidden_state.device)
        for (i, quantizer) in enumerate(self.quantizers):
            if self.training is False and i >= n_quantizers:
                break
            (quantized_representation_i, commitment_loss_i, codebook_loss_i, indices_i, projected_latents_i) = quantizer(residual)
            mask = torch.full((hidden_state.shape[0],), fill_value=i, device=hidden_state.device) < n_quantizers
            quantized_representation = quantized_representation + quantized_representation_i * mask[:, None, None]
            residual = residual - quantized_representation_i
            commitment_loss += commitment_loss_i * mask
            codebook_loss += codebook_loss_i * mask
            audio_codes.append(indices_i)
            projected_latents.append(projected_latents_i)
        audio_codes = torch.stack(audio_codes, dim=1)
        projected_latents = torch.cat(projected_latents, dim=1)
        return (quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss)

    def from_codes(self, audio_codes: torch.Tensor):
        quantized_representation = 0.0
        projected_latents = []
        n_codebooks = audio_codes.shape[1]
        for i in range(n_codebooks):
            projected_latents_i = self.quantizers[i].codebook(audio_codes[:, i, :]).transpose(1, 2)
            projected_latents.append(projected_latents_i)
            quantized_representation += self.quantizers[i].out_proj(projected_latents_i)
        return (quantized_representation, torch.cat(projected_latents, dim=1), audio_codes)

    def from_latents(self, latents: torch.Tensor):
        quantized_representation = 0
        quantized_latents = []
        codes = []
        codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers])
        dims = torch.cumsum(codebook_dims_tensor, dim=0)
        n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0]
        for i in range(n_codebooks):
            (hidden_dim_j, hidden_dim_k) = (dims[i], dims[i + 1])
            (quantized_latents_i, codes_i) = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :])
            quantized_latents.append(quantized_latents_i)
            codes.append(codes_i)
            quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i)
            quantized_representation = quantized_representation + quantized_representation_i
        return (quantized_representation, torch.cat(quantized_latents, dim=1))

class DacDecoder(nn.Module):

    def __init__(self, config: DacConfig):
        super().__init__()
        input_channel = config.hidden_size
        channels = config.decoder_hidden_size
        strides = config.upsampling_ratios
        self.conv1 = nn.Conv1d(input_channel, channels, kernel_size=7, padding=3)
        block = []
        for (stride_index, stride) in enumerate(strides):
            block += [DacDecoderBlock(config, stride, stride_index)]
        self.block = nn.ModuleList(block)
        output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1)
        self.snake1 = Snake1d(output_dim)
        self.conv2 = nn.Conv1d(output_dim, 1, kernel_size=7, padding=3)
        self.tanh = nn.Tanh()

    def forward(self, hidden_state):
        hidden_state = self.conv1(hidden_state)
        for layer in self.block:
            hidden_state = layer(hidden_state)
        hidden_state = self.snake1(hidden_state)
        hidden_state = self.conv2(hidden_state)
        hidden_state = self.tanh(hidden_state)
        return hidden_state

class DacEncoder(nn.Module):

    def __init__(self, config: DacConfig):
        super().__init__()
        strides = config.downsampling_ratios
        self.conv1 = nn.Conv1d(1, config.encoder_hidden_size, kernel_size=7, padding=3)
        self.block = []
        for (stride_index, stride) in enumerate(strides):
            stride_index = stride_index + 1
            self.block += [DacEncoderBlock(config, stride=stride, stride_index=stride_index)]
        self.block = nn.ModuleList(self.block)
        d_model = config.encoder_hidden_size * 2 ** stride_index
        self.snake1 = Snake1d(d_model)
        self.conv2 = nn.Conv1d(d_model, config.hidden_size, kernel_size=3, padding=1)

    def forward(self, hidden_state):
        hidden_state = self.conv1(hidden_state)
        for module in self.block:
            hidden_state = module(hidden_state)
        hidden_state = self.snake1(hidden_state)
        hidden_state = self.conv2(hidden_state)
        return hidden_state

class DacPreTrainedModel(PreTrainedModel):
    config_class = DacConfig
    base_model_prefix = 'dac'
    main_input_name = 'input_values'

    def _init_weights(self, module):
        if isinstance(module, nn.Conv1d):
            nn.init.trunc_normal_(module.weight, std=0.02)
            nn.init.constant_(module.bias, 0)

    def apply_weight_norm(self):
        for layer in self.quantizer.quantizers:
            nn.utils.weight_norm(layer.in_proj)
            nn.utils.weight_norm(layer.out_proj)
        nn.utils.weight_norm(self.encoder.conv1)
        nn.utils.weight_norm(self.encoder.conv2)
        for layer in self.encoder.block:
            nn.utils.weight_norm(layer.conv1)
            nn.utils.weight_norm(layer.res_unit1.conv1)
            nn.utils.weight_norm(layer.res_unit1.conv2)
            nn.utils.weight_norm(layer.res_unit2.conv1)
            nn.utils.weight_norm(layer.res_unit2.conv2)
            nn.utils.weight_norm(layer.res_unit3.conv1)
            nn.utils.weight_norm(layer.res_unit3.conv2)
        nn.utils.weight_norm(self.decoder.conv1)
        nn.utils.weight_norm(self.decoder.conv2)
        for layer in self.decoder.block:
            nn.utils.weight_norm(layer.conv_t1)
            nn.utils.weight_norm(layer.res_unit1.conv1)
            nn.utils.weight_norm(layer.res_unit1.conv2)
            nn.utils.weight_norm(layer.res_unit2.conv1)
            nn.utils.weight_norm(layer.res_unit2.conv2)
            nn.utils.weight_norm(layer.res_unit3.conv1)
            nn.utils.weight_norm(layer.res_unit3.conv2)

    def remove_weight_norm(self):
        for layer in self.quantizer.quantizers:
            nn.utils.remove_weight_norm(layer.in_proj)
            nn.utils.remove_weight_norm(layer.out_proj)
        nn.utils.remove_weight_norm(self.encoder.conv1)
        nn.utils.remove_weight_norm(self.encoder.conv2)
        for layer in self.encoder.block:
            nn.utils.remove_weight_norm(layer.conv1)
            nn.utils.remove_weight_norm(layer.res_unit1.conv1)
            nn.utils.remove_weight_norm(layer.res_unit1.conv2)
            nn.utils.remove_weight_norm(layer.res_unit2.conv1)
            nn.utils.remove_weight_norm(layer.res_unit2.conv2)
            nn.utils.remove_weight_norm(layer.res_unit3.conv1)
            nn.utils.remove_weight_norm(layer.res_unit3.conv2)
        nn.utils.remove_weight_norm(self.decoder.conv1)
        nn.utils.remove_weight_norm(self.decoder.conv2)
        for layer in self.decoder.block:
            nn.utils.remove_weight_norm(layer.conv_t1)
            nn.utils.remove_weight_norm(layer.res_unit1.conv1)
            nn.utils.remove_weight_norm(layer.res_unit1.conv2)
            nn.utils.remove_weight_norm(layer.res_unit2.conv1)
            nn.utils.remove_weight_norm(layer.res_unit2.conv2)
            nn.utils.remove_weight_norm(layer.res_unit3.conv1)
            nn.utils.remove_weight_norm(layer.res_unit3.conv2)
DAC_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`DacConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DAC_INPUTS_DOCSTRING = '\n    Args:\n        input_values (`torch.Tensor` of shape `(batch_size, 1, time_steps)`).\n            Audio data to encode,\n        n_quantizers (`int`, *optional*):\n            Number of quantizers to use. If `None`, all quantizers are used. Default is `None`.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The DAC (Descript Audio Codec) model.', DAC_START_DOCSTRING)
class DacModel(DacPreTrainedModel):

    def __init__(self, config: DacConfig):
        super().__init__(config)
        self.config = config
        self.encoder = DacEncoder(config)
        self.decoder = DacDecoder(config)
        self.quantizer = DacResidualVectorQuantize(config)
        self.bits_per_codebook = int(math.log2(self.config.codebook_size))
        if 2 ** self.bits_per_codebook != self.config.codebook_size:
            raise ValueError('The codebook_size must be a power of 2.')
        self.post_init()

    @replace_return_docstrings(output_type=DacEncoderOutput, config_class=_CONFIG_FOR_DOC)
    def encode(self, input_values: torch.Tensor, n_quantizers: int=None, return_dict: Optional[bool]=None):
        return_dict = return_dict if return_dict is not None else self.config.return_dict
        quantized_representation = self.encoder(input_values)
        (quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss) = self.quantizer(quantized_representation, n_quantizers)
        loss = self.config.commitment_loss_weight * commitment_loss + self.config.codebook_loss_weight * codebook_loss
        if not return_dict:
            return (loss, quantized_representation, audio_codes, projected_latents)
        return DacEncoderOutput(loss, quantized_representation, audio_codes, projected_latents)

    @replace_return_docstrings(output_type=DacDecoderOutput, config_class=_CONFIG_FOR_DOC)
    def decode(self, quantized_representation: Optional[torch.Tensor], audio_codes: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None):
        if quantized_representation is None and audio_codes is None:
            raise ValueError('Either `quantized_representation` or `audio_codes` must be provided.')
        return_dict = return_dict if return_dict is not None else self.config.return_dict
        if audio_codes is not None:
            quantized_representation = self.quantizer.from_codes(audio_codes)[0]
        audio_values = self.decoder(quantized_representation).squeeze(1)
        if not return_dict:
            return (audio_values,)
        return DacDecoderOutput(audio_values)

    @add_start_docstrings_to_model_forward(DAC_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=DacOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_values: torch.Tensor, n_quantizers: int=None, return_dict: Optional[bool]=None):
        return_dict = return_dict if return_dict is not None else self.config.return_dict
        length = input_values.shape[-1]
        (loss, quantized_representation, audio_codes, projected_latents) = self.encode(input_values, n_quantizers, return_dict=False)
        audio_values = self.decode(quantized_representation, return_dict=False)[0][..., :length]
        if not return_dict:
            return (loss, audio_values, quantized_representation, audio_codes, projected_latents)
        return DacOutput(loss, audio_values, quantized_representation, audio_codes, projected_latents)

# File: transformers-main/src/transformers/models/data2vec/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {'configuration_data2vec_audio': ['Data2VecAudioConfig'], 'configuration_data2vec_text': ['Data2VecTextConfig', 'Data2VecTextOnnxConfig'], 'configuration_data2vec_vision': ['Data2VecVisionConfig', 'Data2VecVisionOnnxConfig']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_data2vec_audio'] = ['Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel']
    _import_structure['modeling_data2vec_text'] = ['Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel']
    _import_structure['modeling_data2vec_vision'] = ['Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel']
if is_tf_available():
    _import_structure['modeling_tf_data2vec_vision'] = ['TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_data2vec_audio import Data2VecAudioConfig
    from .configuration_data2vec_text import Data2VecTextConfig, Data2VecTextOnnxConfig
    from .configuration_data2vec_vision import Data2VecVisionConfig, Data2VecVisionOnnxConfig
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_data2vec_audio import Data2VecAudioForAudioFrameClassification, Data2VecAudioForCTC, Data2VecAudioForSequenceClassification, Data2VecAudioForXVector, Data2VecAudioModel, Data2VecAudioPreTrainedModel
        from .modeling_data2vec_text import Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextModel, Data2VecTextPreTrainedModel
        from .modeling_data2vec_vision import Data2VecVisionForImageClassification, Data2VecVisionForMaskedImageModeling, Data2VecVisionForSemanticSegmentation, Data2VecVisionModel, Data2VecVisionPreTrainedModel
    if is_tf_available():
        from .modeling_tf_data2vec_vision import TFData2VecVisionForImageClassification, TFData2VecVisionForSemanticSegmentation, TFData2VecVisionModel, TFData2VecVisionPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/data2vec/configuration_data2vec_audio.py
""""""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class Data2VecAudioConfig(PretrainedConfig):
    model_type = 'data2vec-audio'

    def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embedding_groups=16, conv_pos_kernel_size=19, num_conv_pos_embeddings=5, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='sum', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, **kwargs):
        super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
        self.hidden_size = hidden_size
        self.feat_extract_activation = feat_extract_activation
        self.conv_dim = list(conv_dim)
        self.conv_stride = list(conv_stride)
        self.conv_kernel = list(conv_kernel)
        self.conv_bias = conv_bias
        self.num_conv_pos_embeddings = num_conv_pos_embeddings
        self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
        self.conv_pos_kernel_size = conv_pos_kernel_size
        self.num_feat_extract_layers = len(self.conv_dim)
        self.num_hidden_layers = num_hidden_layers
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.num_attention_heads = num_attention_heads
        self.hidden_dropout = hidden_dropout
        self.attention_dropout = attention_dropout
        self.activation_dropout = activation_dropout
        self.feat_proj_dropout = feat_proj_dropout
        self.final_dropout = final_dropout
        self.layerdrop = layerdrop
        self.layer_norm_eps = layer_norm_eps
        self.initializer_range = initializer_range
        self.vocab_size = vocab_size
        self.use_weighted_layer_sum = use_weighted_layer_sum
        if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers:
            raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
        self.mask_time_prob = mask_time_prob
        self.mask_time_length = mask_time_length
        self.mask_time_min_masks = mask_time_min_masks
        self.mask_feature_prob = mask_feature_prob
        self.mask_feature_length = mask_feature_length
        self.mask_feature_min_masks = mask_feature_min_masks
        self.ctc_loss_reduction = ctc_loss_reduction
        self.ctc_zero_infinity = ctc_zero_infinity
        self.add_adapter = add_adapter
        self.adapter_kernel_size = adapter_kernel_size
        self.adapter_stride = adapter_stride
        self.num_adapter_layers = num_adapter_layers
        self.output_hidden_size = output_hidden_size or hidden_size
        self.classifier_proj_size = classifier_proj_size
        self.tdnn_dim = list(tdnn_dim)
        self.tdnn_kernel = list(tdnn_kernel)
        self.tdnn_dilation = list(tdnn_dilation)
        self.xvector_output_dim = xvector_output_dim

    @property
    def inputs_to_logits_ratio(self):
        return math.prod(self.conv_stride)

# File: transformers-main/src/transformers/models/data2vec/configuration_data2vec_text.py
""""""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class Data2VecTextConfig(PretrainedConfig):
    model_type = 'data2vec-text'

    def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
        super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.hidden_act = hidden_act
        self.intermediate_size = intermediate_size
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.position_embedding_type = position_embedding_type
        self.use_cache = use_cache
        self.classifier_dropout = classifier_dropout

class Data2VecTextOnnxConfig(OnnxConfig):

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        if self.task == 'multiple-choice':
            dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
        else:
            dynamic_axis = {0: 'batch', 1: 'sequence'}
        return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])

# File: transformers-main/src/transformers/models/data2vec/configuration_data2vec_vision.py
""""""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class Data2VecVisionConfig(PretrainedConfig):
    model_type = 'data2vec-vision'

    def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, use_mask_token=False, use_absolute_position_embeddings=False, use_relative_position_bias=False, use_shared_relative_position_bias=False, layer_scale_init_value=0.1, drop_path_rate=0.1, use_mean_pooling=True, out_indices=[3, 5, 7, 11], pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, semantic_loss_ignore_index=255, **kwargs):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.use_mask_token = use_mask_token
        self.use_absolute_position_embeddings = use_absolute_position_embeddings
        self.use_relative_position_bias = use_relative_position_bias
        self.use_shared_relative_position_bias = use_shared_relative_position_bias
        self.layer_scale_init_value = layer_scale_init_value
        self.drop_path_rate = drop_path_rate
        self.use_mean_pooling = use_mean_pooling
        self.out_indices = out_indices
        self.pool_scales = pool_scales
        self.use_auxiliary_head = use_auxiliary_head
        self.auxiliary_loss_weight = auxiliary_loss_weight
        self.auxiliary_channels = auxiliary_channels
        self.auxiliary_num_convs = auxiliary_num_convs
        self.auxiliary_concat_input = auxiliary_concat_input
        self.semantic_loss_ignore_index = semantic_loss_ignore_index

class Data2VecVisionOnnxConfig(OnnxConfig):
    torch_onnx_minimum_version = version.parse('1.11')

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])

    @property
    def atol_for_validation(self) -> float:
        return 0.0001

# File: transformers-main/src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py
""""""
import argparse
import os
from functools import reduce
import fairseq
import torch
from datasets import load_dataset
from transformers import Wav2Vec2Processor, logging
from transformers.models.data2vec.configuration_data2vec_audio import Data2VecAudioConfig
from transformers.models.data2vec.data2vec_audio import Data2VecAudioModel as Dummy
from transformers.models.data2vec.modeling_data2vec_audio import Data2VecAudioForCTC, Data2VecAudioModel
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
MAPPING = {'post_extract_proj': 'feature_projection.projection', 'models.0.layer_norm': 'feature_projection.layer_norm', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed'}
TOP_LEVEL_KEYS = ['lm_head']

def set_recursively(hf_pointer, key, value, full_name, weight_type):
    for attribute in key.split('.'):
        hf_pointer = getattr(hf_pointer, attribute)
    if weight_type is not None:
        hf_shape = getattr(hf_pointer, weight_type).shape
    else:
        hf_shape = hf_pointer.shape
    if hf_shape != value.shape:
        raise ValueError(f"Shape of hf {(key + '.' + weight_type if weight_type is not None else '')} is {hf_shape}, but should be {value.shape} for {full_name}")
    if weight_type == 'weight':
        hf_pointer.weight.data = value
    elif weight_type == 'weight_g':
        hf_pointer.weight_g.data = value
    elif weight_type == 'weight_v':
        hf_pointer.weight_v.data = value
    elif weight_type == 'bias':
        hf_pointer.bias.data = value
    else:
        hf_pointer.data = value
    logger.info(f"{(key + '.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")

def recursively_load_weights(fairseq_model, hf_model, is_headless):
    unused_weights = []
    fairseq_dict = fairseq_model.state_dict()
    if not is_headless:
        feature_extractor = hf_model.data2vec_audio.feature_extractor
        pos_conv_embedding = hf_model.data2vec_audio.encoder.pos_conv_embed
    else:
        feature_extractor = hf_model.feature_extractor
        pos_conv_embedding = hf_model.encoder.pos_conv_embed
    for (name, value) in fairseq_dict.items():
        is_used = False
        if 'conv_layers' in name:
            load_conv_layer(name, value, feature_extractor, unused_weights)
            is_used = True
        elif 'pos_conv' in name:
            load_pos_conv_layer(name, value, pos_conv_embedding, unused_weights)
            is_used = True
        else:
            for (key, mapped_key) in MAPPING.items():
                if not is_headless:
                    mapped_key = 'data2vec_audio.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
                if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
                    is_used = True
                    if '*' in mapped_key:
                        layer_index = name.split(key)[0].split('.')[-2]
                        mapped_key = mapped_key.replace('*', layer_index)
                    if 'weight_g' in name:
                        weight_type = 'weight_g'
                    elif 'weight_v' in name:
                        weight_type = 'weight_v'
                    elif 'bias' in name:
                        weight_type = 'bias'
                    elif 'weight' in name:
                        weight_type = 'weight'
                    else:
                        weight_type = None
                    set_recursively(hf_model, mapped_key, value, name, weight_type)
                continue
        if not is_used:
            unused_weights.append(name)
    logger.warning(f'Unused weights: {unused_weights}')

def access_by_string(module, path):
    names = path.split('.')
    return reduce(getattr, names, module)

def set_weights(full_name, module, fsq_value, hf_weight_path):
    hf_weight = access_by_string(module, hf_weight_path)
    hf_value = hf_weight.data
    if fsq_value.shape != hf_value.shape:
        raise ValueError(f'{full_name} has size {fsq_value.shape}, but {hf_value.shape} was found.')
    hf_weight.data = fsq_value
    logger.info(f'{full_name} was correctly initialized from {hf_weight_path}.')

def load_conv_layer(full_name, value, feature_extractor, unused_weights):
    name = full_name.split('conv_layers.')[-1]
    items = name.split('.')
    layer_id = int(items[0])
    type_id = int(items[1])
    weight_type = name.split('.')[-1]
    if type_id == 0:
        layer_type = 'conv'
    elif type_id == 2:
        layer_type = 'layer_norm'
    else:
        unused_weights.append(full_name)
        return
    set_weights(full_name, feature_extractor, value, f'conv_layers.{layer_id}.{layer_type}.{weight_type}')

def load_pos_conv_layer(full_name, value, pos_conv_embeddings, unused_weights):
    name = full_name.split('pos_conv.')[-1]
    items = name.split('.')
    layer_id = int(items[0])
    type_id = int(items[1])
    weight_type = name.split('.')[-1]
    if type_id != 0:
        unused_weights.append(full_name)
        return
    else:
        layer_type = 'conv'
    set_weights(full_name, pos_conv_embeddings, value, f'layers.{layer_id}.{layer_type}.{weight_type}')

@torch.no_grad()
def convert_wav2vec2_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True):
    if config_path is not None:
        config = Data2VecAudioConfig.from_pretrained(config_path)
    else:
        config = Data2VecAudioConfig()
    if not is_finetuned:
        hf_wav2vec = Data2VecAudioModel(config)
        data2vec_checkpoint_dir = os.path.dirname(checkpoint_path)
        state_dict = torch.load(checkpoint_path)
        state_dict['model']['final_proj.weight'] = state_dict['model'].pop('final_proj.0.weight')
        state_dict['model']['final_proj.bias'] = state_dict['model'].pop('final_proj.0.bias')
        converted_ckpt = os.path.join(data2vec_checkpoint_dir, 'converted.pt')
        torch.save(state_dict, converted_ckpt)
    else:
        hf_wav2vec = Data2VecAudioForCTC(config)
        converted_ckpt = checkpoint_path

    def load_data2vec(path):
        (model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([path])
        return model[0].eval()
    model = load_data2vec(converted_ckpt)
    recursively_load_weights(model, hf_wav2vec, not is_finetuned)
    processor = Wav2Vec2Processor.from_pretrained('facebook/wav2vec2-large-lv60')
    ds = load_dataset('patrickvonplaten/librispeech_asr_dummy', 'clean', split='validation', trust_remote_code=True)
    input_audio = [x['array'] for x in ds[:4]['audio']]
    inputs = processor(input_audio, return_tensors='pt', padding=True)
    input_values = inputs.input_values
    attention_mask = inputs.attention_mask
    hf_wav2vec.eval()
    model.eval()
    if is_finetuned:
        their_output = model(source=input_values, padding_mask=1 - attention_mask, mask=False, features_only=True)['encoder_out'].transpose(0, 1)
        our_output = hf_wav2vec(input_values, attention_mask=attention_mask)['logits']
        pred_ids = torch.argmax(our_output, dim=-1)
        output_string = processor.batch_decode(pred_ids)
        print(f"Expected Output: {ds[:4]['text']}, Pred: {output_string}")
    else:
        their_output = model(source=input_values, padding_mask=1 - attention_mask, mask=False, features_only=True)['layer_results'][-1][0].transpose(0, 1)
        our_output = hf_wav2vec(input_values, attention_mask=attention_mask)['last_hidden_state']
    print(our_output.shape, their_output.shape)
    max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
    print(f'max_absolute_diff = {max_absolute_diff}')
    success = torch.allclose(our_output, their_output, atol=0.001)
    print('Do both models output the same tensors?', '🔥' if success else '💩')
    if not success:
        raise Exception('Something went wRoNg')
    hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
    if is_finetuned:
        processor.save_pretrained(pytorch_dump_folder_path)
    else:
        processor.feature_extractor.save_pretrained(pytorch_dump_folder_path)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
    parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
    parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
    parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
    parser.add_argument('--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not')
    args = parser.parse_args()
    convert_wav2vec2_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned)

# File: transformers-main/src/transformers/models/data2vec/convert_data2vec_text_original_pytorch_checkpoint_to_pytorch.py
""""""
import argparse
import os
import pathlib
import fairseq
import torch
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import Data2VecTextConfig, Data2VecTextForMaskedLM, Data2VecTextForSequenceClassification, Data2VecTextModel
from transformers.models.bert.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
    raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
SAMPLE_TEXT = 'Hello world! cécé herlolip'

def convert_data2vec_checkpoint_to_pytorch(data2vec_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool):
    (data2vec_checkpoint_dir, data2vec_checkpoint_file_name) = os.path.split(data2vec_checkpoint_path)
    data2vec = Data2VecTextModel.from_pretrained(data2vec_checkpoint_dir, checkpoint_file=data2vec_checkpoint_file_name)
    data2vec.eval()
    data2vec_model = data2vec.models[0]
    data2vec_sent_encoder = data2vec_model.encoder.sentence_encoder
    config = Data2VecTextConfig(vocab_size=data2vec_sent_encoder.embed_tokens.num_embeddings, hidden_size=data2vec_model.args.encoder_embed_dim, num_hidden_layers=data2vec_model.args.encoder_layers, num_attention_heads=data2vec_model.args.encoder_attention_heads, intermediate_size=data2vec_model.args.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05)
    if classification_head:
        config.num_labels = data2vec.model.classification_heads['mnli'].out_proj.weight.shape[0]
    print('Our BERT config:', config)
    model = Data2VecTextForSequenceClassification(config) if classification_head else Data2VecTextForMaskedLM(config)
    model.eval()
    model.data2vec_text.embeddings.word_embeddings.weight = data2vec_sent_encoder.embed_tokens.weight
    model.data2vec_text.embeddings.position_embeddings.weight = data2vec_sent_encoder.embed_positions.weight
    model.data2vec_text.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.data2vec_text.embeddings.token_type_embeddings.weight)
    model.data2vec_text.embeddings.LayerNorm.weight = data2vec_sent_encoder.layernorm_embedding.weight
    model.data2vec_text.embeddings.LayerNorm.bias = data2vec_sent_encoder.layernorm_embedding.bias
    for i in range(config.num_hidden_layers):
        layer: BertLayer = model.data2vec_text.encoder.layer[i]
        data2vec_layer: TransformerSentenceEncoderLayer = data2vec_sent_encoder.layers[i]
        self_attn: BertSelfAttention = layer.attention.self
        assert data2vec_layer.self_attn.k_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)), f'Shape for data2vec_layer.self_attn.k_proj.weight.data should be {torch.Size((config.hidden_size, config.hidden_size))}'
        assert data2vec_layer.self_attn.q_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)), f'Shape for data2vec_layer.self_attn.q_proj.weight.data should be {torch.Size((config.hidden_size, config.hidden_size))}'
        assert data2vec_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)), f'Shape for data2vec_layer.self_attn.v_proj.weight.data should be {torch.Size((config.hidden_size, config.hidden_size))}'
        self_attn.query.weight.data = data2vec_layer.self_attn.q_proj.weight
        self_attn.query.bias.data = data2vec_layer.self_attn.q_proj.bias
        self_attn.key.weight.data = data2vec_layer.self_attn.k_proj.weight
        self_attn.key.bias.data = data2vec_layer.self_attn.k_proj.bias
        self_attn.value.weight.data = data2vec_layer.self_attn.v_proj.weight
        self_attn.value.bias.data = data2vec_layer.self_attn.v_proj.bias
        self_output: BertSelfOutput = layer.attention.output
        assert self_output.dense.weight.shape == data2vec_layer.self_attn.out_proj.weight.shape, f'Shape for self_output.dense.weight should be {data2vec_layer.self_attn.out_proj.weight.shape}'
        self_output.dense.weight = data2vec_layer.self_attn.out_proj.weight
        self_output.dense.bias = data2vec_layer.self_attn.out_proj.bias
        self_output.LayerNorm.weight = data2vec_layer.self_attn_layer_norm.weight
        self_output.LayerNorm.bias = data2vec_layer.self_attn_layer_norm.bias
        intermediate: BertIntermediate = layer.intermediate
        assert intermediate.dense.weight.shape == data2vec_layer.fc1.weight.shape, f'Shape for intermediate.dense.weight should be {data2vec_layer.fc1.weight.shape}'
        intermediate.dense.weight = data2vec_layer.fc1.weight
        intermediate.dense.bias = data2vec_layer.fc1.bias
        bert_output: BertOutput = layer.output
        assert bert_output.dense.weight.shape == data2vec_layer.fc2.weight.shape, f'Shape for bert_output.dense.weight should be {data2vec_layer.fc2.weight.shape}'
        bert_output.dense.weight = data2vec_layer.fc2.weight
        bert_output.dense.bias = data2vec_layer.fc2.bias
        bert_output.LayerNorm.weight = data2vec_layer.final_layer_norm.weight
        bert_output.LayerNorm.bias = data2vec_layer.final_layer_norm.bias
    if classification_head:
        model.classifier.dense.weight = data2vec.model.classification_heads['mnli'].dense.weight
        model.classifier.dense.bias = data2vec.model.classification_heads['mnli'].dense.bias
        model.classifier.out_proj.weight = data2vec.model.classification_heads['mnli'].out_proj.weight
        model.classifier.out_proj.bias = data2vec.model.classification_heads['mnli'].out_proj.bias
    else:
        model.lm_head.dense.weight = data2vec_model.encoder.lm_head.dense.weight
        model.lm_head.dense.bias = data2vec_model.encoder.lm_head.dense.bias
        model.lm_head.layer_norm.weight = data2vec_model.encoder.lm_head.layer_norm.weight
        model.lm_head.layer_norm.bias = data2vec_model.encoder.lm_head.layer_norm.bias
        model.lm_head.decoder.weight = data2vec_model.encoder.lm_head.weight
        model.lm_head.decoder.bias = data2vec_model.encoder.lm_head.bias
    input_ids: torch.Tensor = data2vec.encode(SAMPLE_TEXT).unsqueeze(0)
    our_output = model(input_ids)[0]
    if classification_head:
        their_output = data2vec.model.classification_heads['mnli'](data2vec.extract_features(input_ids))
    else:
        their_output = data2vec_model(input_ids)[0]
    print(our_output.shape, their_output.shape)
    max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
    print(f'max_absolute_diff = {max_absolute_diff}')
    success = torch.allclose(our_output, their_output, atol=0.001)
    print('Do both models output the same tensors?', '🔥' if success else '💩')
    if not success:
        raise Exception('Something went wRoNg')
    pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
    print(f'Saving model to {pytorch_dump_folder_path}')
    model.save_pretrained(pytorch_dump_folder_path)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.')
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.')
    parser.add_argument('--classification_head', action='store_true', help='Whether to convert a final classification head.')
    args = parser.parse_args()
    convert_data2vec_checkpoint_to_pytorch(args.checkpoint_path, args.pytorch_dump_folder_path, args.classification_head)

# File: transformers-main/src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py
import argparse
import json
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.models import create_model
from transformers import BeitImageProcessor, Data2VecVisionConfig, Data2VecVisionForImageClassification, Data2VecVisionModel

def create_rename_keys(config, has_lm_head=False, is_semantic=False, hf_prefix='data2vec.'):
    prefix = 'backbone.' if is_semantic else ''
    rename_keys = []
    for i in range(config.num_hidden_layers):
        rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'{hf_prefix}encoder.layer.{i}.layernorm_before.weight'))
        rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'{hf_prefix}encoder.layer.{i}.layernorm_before.bias'))
        rename_keys.append((f'{prefix}blocks.{i}.attn.proj.weight', f'{hf_prefix}encoder.layer.{i}.attention.output.dense.weight'))
        rename_keys.append((f'{prefix}blocks.{i}.attn.proj.bias', f'{hf_prefix}encoder.layer.{i}.attention.output.dense.bias'))
        rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'{hf_prefix}encoder.layer.{i}.layernorm_after.weight'))
        rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'{hf_prefix}encoder.layer.{i}.layernorm_after.bias'))
        rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'{hf_prefix}encoder.layer.{i}.intermediate.dense.weight'))
        rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'{hf_prefix}encoder.layer.{i}.intermediate.dense.bias'))
        rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'{hf_prefix}encoder.layer.{i}.output.dense.weight'))
        rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'{hf_prefix}encoder.layer.{i}.output.dense.bias'))
    rename_keys.extend([(f'{prefix}cls_token', f'{hf_prefix}embeddings.cls_token'), (f'{prefix}patch_embed.proj.weight', f'{hf_prefix}embeddings.patch_embeddings.projection.weight'), (f'{prefix}patch_embed.proj.bias', f'{hf_prefix}embeddings.patch_embeddings.projection.bias')])
    if has_lm_head:
        rename_keys.extend([('mask_token', f'{hf_prefix}embeddings.mask_token'), ('rel_pos_bias.relative_position_bias_table', f'{hf_prefix}encoder.relative_position_bias.relative_position_bias_table'), ('rel_pos_bias.relative_position_index', f'{hf_prefix}encoder.relative_position_bias.relative_position_index'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias')])
    elif is_semantic:
        rename_keys.extend([('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias')])
    else:
        rename_keys.extend([('fc_norm.weight', f'{hf_prefix}pooler.layernorm.weight'), ('fc_norm.bias', f'{hf_prefix}pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias')])
    return rename_keys

def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False, hf_prefix='data2vec_vision.'):
    for i in range(config.num_hidden_layers):
        prefix = 'backbone.' if is_semantic else ''
        in_proj_weight = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight')
        q_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias')
        v_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias')
        state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :]
        state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.query.bias'] = q_bias
        state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :]
        state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :]
        state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.value.bias'] = v_bias
        gamma_1 = state_dict.pop(f'{prefix}blocks.{i}.gamma_1')
        gamma_2 = state_dict.pop(f'{prefix}blocks.{i}.gamma_2')
        state_dict[f'{hf_prefix}encoder.layer.{i}.lambda_1'] = gamma_1
        state_dict[f'{hf_prefix}encoder.layer.{i}.lambda_2'] = gamma_2
        if not has_lm_head:
            table = state_dict.pop(f'{prefix}blocks.{i}.attn.relative_position_bias_table')
            index = state_dict.pop(f'{prefix}blocks.{i}.attn.relative_position_index')
            state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table'] = table
            state_dict[f'{hf_prefix}encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index'] = index

def get_args():
    parser = argparse.ArgumentParser('Convert Data2VecVision to HF for image classification and pretraining', add_help=False)
    parser.add_argument('--hf_checkpoint_name', type=str)
    parser.add_argument('--input_size', default=224, type=int, help='images input size')
    parser.add_argument('--beit_checkpoint', default='', help='beit checkpoint')
    return parser.parse_args()

def load_beit_model(args, is_finetuned, is_large):

    def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'):
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
        metadata = getattr(state_dict, '_metadata', None)
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

        def load(module, prefix=''):
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
            for (name, child) in module._modules.items():
                if child is not None:
                    load(child, prefix + name + '.')
        load(model, prefix=prefix)
        warn_missing_keys = []
        ignore_missing_keys = []
        for key in missing_keys:
            keep_flag = True
            for ignore_key in ignore_missing.split('|'):
                if ignore_key in key:
                    keep_flag = False
                    break
            if keep_flag:
                warn_missing_keys.append(key)
            else:
                ignore_missing_keys.append(key)
        missing_keys = warn_missing_keys
        if len(missing_keys) > 0:
            print('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
        if len(unexpected_keys) > 0:
            print('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
        if len(ignore_missing_keys) > 0:
            print('Ignored weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ignore_missing_keys))
        if len(error_msgs) > 0:
            print('\n'.join(error_msgs))
    model_kwargs = {'pretrained': False, 'use_shared_rel_pos_bias': True, 'use_abs_pos_emb': False, 'init_values': 0.1}
    if is_finetuned:
        model_kwargs.update({'num_classes': 1000, 'use_mean_pooling': True, 'init_scale': 0.001, 'use_rel_pos_bias': True})
    model = create_model('beit_large_patch16_224' if is_large else 'beit_base_patch16_224', **model_kwargs)
    patch_size = model.patch_embed.patch_size
    args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
    checkpoint = torch.load(args.beit_checkpoint, map_location='cpu')
    print(f'Load ckpt from {args.beit_checkpoint}')
    checkpoint_model = None
    for model_key in ('model', 'module'):
        if model_key in checkpoint:
            checkpoint_model = checkpoint[model_key]
            print(f'Load state_dict by model_key = {model_key}')
            break
    all_keys = list(checkpoint_model.keys())
    for key in all_keys:
        if 'relative_position_index' in key:
            checkpoint_model.pop(key)
        if 'relative_position_bias_table' in key:
            rel_pos_bias = checkpoint_model[key]
            (src_num_pos, num_attn_heads) = rel_pos_bias.size()
            (dst_num_pos, _) = model.state_dict()[key].size()
            dst_patch_shape = model.patch_embed.patch_shape
            if dst_patch_shape[0] != dst_patch_shape[1]:
                raise NotImplementedError()
    load_state_dict(model, checkpoint_model, prefix='')
    return model

def main():
    args = get_args()
    is_finetuned = 'ft1k' in args.hf_checkpoint_name
    is_large = 'large' in args.hf_checkpoint_name
    if is_finetuned:
        import modeling_finetune
    else:
        import modeling_cyclical
    config = Data2VecVisionConfig()
    if is_finetuned:
        config.use_relative_position_bias = True
        config.use_shared_relative_position_bias = False
        config.use_mean_pooling = True
        config.num_labels = 1000
        repo_id = 'huggingface/label-files'
        filename = 'imagenet-1k-id2label.json'
        id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
        id2label = {int(k): v for (k, v) in id2label.items()}
        config.id2label = id2label
        config.label2id = {v: k for (k, v) in id2label.items()}
    else:
        config.use_relative_position_bias = False
        config.use_shared_relative_position_bias = True
        config.use_mean_pooling = False
    if is_large:
        config.hidden_size = 1024
        config.intermediate_size = 4096
        config.num_hidden_layers = 24
        config.num_attention_heads = 16
    orig_model = load_beit_model(args, is_finetuned, is_large)
    orig_model.eval()
    image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
    image = Image.open('../../../../tests/fixtures/tests_samples/COCO/000000039769.png')
    encoding = image_processor(images=image, return_tensors='pt')
    pixel_values = encoding['pixel_values']
    orig_args = (pixel_values,) if is_finetuned else (pixel_values, None)
    with torch.no_grad():
        orig_model_output = orig_model(*orig_args)
    if is_finetuned:
        hf_model = Data2VecVisionForImageClassification(config)
        hf_model.eval()
        has_lm_head = False
        hf_prefix = 'data2vec_vision.'
    else:
        hf_model = Data2VecVisionModel(config)
        hf_model.eval()
        has_lm_head = True
        hf_prefix = ''
    rename_keys = create_rename_keys(config, hf_prefix=hf_prefix, has_lm_head=has_lm_head)
    state_dict = orig_model.state_dict()
    for (src, dest) in rename_keys:
        val = state_dict.pop(src)
        state_dict[dest] = val
    read_in_q_k_v(state_dict, config, hf_prefix=hf_prefix, has_lm_head=has_lm_head)
    (missing_keys, unexpected_keys) = hf_model.load_state_dict(state_dict, strict=False)
    print('HF missing', missing_keys)
    print('HF unexpected_keys', unexpected_keys)
    with torch.no_grad():
        hf_model_output = hf_model(pixel_values)
    hf_output = hf_model_output.logits if is_finetuned else hf_model_output.last_hidden_state
    max_absolute_diff = torch.max(torch.abs(hf_output - orig_model_output)).item()
    print(f'max_absolute_diff = {max_absolute_diff}')
    success = torch.allclose(hf_output, orig_model_output, atol=0.001)
    print('Do both models output the same tensors?', '🔥' if success else '💩')
    if not success:
        raise Exception('Something went wRoNg')
    print(f'Saving to {args.hf_checkpoint_name}')
    hf_model.save_pretrained(args.hf_checkpoint_name)
    image_processor.save_pretrained(args.hf_checkpoint_name)
if __name__ == '__main__':
    main()

# File: transformers-main/src/transformers/models/data2vec/modeling_data2vec_audio.py
""""""
import math
import warnings
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_peft_available, logging
from .configuration_data2vec_audio import Data2VecAudioConfig
if is_flash_attn_2_available():
    from ...modeling_flash_attention_utils import _flash_attention_forward
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
_CONFIG_FOR_DOC = 'Data2VecAudioConfig'
_CHECKPOINT_FOR_DOC = 'facebook/data2vec-audio-base-960h'
_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
_CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
_CTC_EXPECTED_LOSS = 66.95

def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor]=None, min_masks: int=0) -> np.ndarray:
    (batch_size, sequence_length) = shape
    if mask_length < 1:
        raise ValueError('`mask_length` has to be bigger than 0.')
    if mask_length > sequence_length:
        raise ValueError(f'`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`')
    epsilon = np.random.rand(1).item()

    def compute_num_masked_span(input_length):
        num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
        num_masked_span = max(num_masked_span, min_masks)
        if num_masked_span * mask_length > sequence_length:
            num_masked_span = sequence_length // mask_length
        if input_length - (mask_length - 1) < num_masked_span:
            num_masked_span = max(input_length - (mask_length - 1), 0)
        return num_masked_span
    input_lengths = attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)]
    spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
    spec_aug_mask_idxs = []
    max_num_masked_span = compute_num_masked_span(sequence_length)
    if max_num_masked_span == 0:
        return spec_aug_mask
    for input_length in input_lengths:
        num_masked_span = compute_num_masked_span(input_length)
        spec_aug_mask_idx = np.random.choice(np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False)
        if len(spec_aug_mask_idx) == 0:
            dummy_mask_idx = sequence_length - 1
        else:
            dummy_mask_idx = spec_aug_mask_idx[0]
        spec_aug_mask_idx = np.concatenate([spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx])
        spec_aug_mask_idxs.append(spec_aug_mask_idx)
    spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
    spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length))
    spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
    offsets = np.arange(mask_length)[None, None, :]
    offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(batch_size, max_num_masked_span * mask_length)
    spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
    if spec_aug_mask_idxs.max() > sequence_length - 1:
        spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
    np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
    return spec_aug_mask

class Data2VecAudioConvLayer(nn.Module):

    def __init__(self, config, layer_id=0):
        super().__init__()
        self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
        self.out_conv_dim = config.conv_dim[layer_id]
        self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias)
        self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
        self.activation = ACT2FN[config.feat_extract_activation]

    def forward(self, hidden_states):
        hidden_states = self.conv(hidden_states)
        hidden_states = hidden_states.transpose(-2, -1)
        hidden_states = self.layer_norm(hidden_states)
        hidden_states = hidden_states.transpose(-2, -1)
        hidden_states = self.activation(hidden_states)
        return hidden_states

class Data2VecAudioPadLayer(nn.Module):

    def __init__(self, num_conv_pos_embeddings):
        super().__init__()
        self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0

    def forward(self, hidden_states):
        if self.num_pad_remove > 0:
            hidden_states = hidden_states[:, :, :-self.num_pad_remove]
        return hidden_states

class Data2VecAudioPositionalConvLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.conv_pos_kernel_size, padding=config.conv_pos_kernel_size // 2, groups=config.num_conv_pos_embedding_groups)
        self.padding = Data2VecAudioPadLayer(config.conv_pos_kernel_size)
        self.activation = ACT2FN[config.feat_extract_activation]
        self.layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False)

    def forward(self, hidden_states):
        hidden_states = self.conv(hidden_states)
        hidden_states = self.padding(hidden_states)
        hidden_states = hidden_states.transpose(1, 2)
        hidden_states = self.layer_norm(hidden_states)
        hidden_states = hidden_states.transpose(1, 2)
        hidden_states = self.activation(hidden_states)
        return hidden_states

class Data2VecAudioPositionalConvEmbedding(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.layers = nn.ModuleList([Data2VecAudioPositionalConvLayer(config) for _ in range(config.num_conv_pos_embeddings)])

    def forward(self, hidden_states):
        hidden_states = hidden_states.transpose(1, 2)
        for layer in self.layers:
            hidden_states = layer(hidden_states)
        hidden_states = hidden_states.transpose(1, 2)
        return hidden_states

class Data2VecAudioFeatureEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.conv_layers = nn.ModuleList([Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)])
        self.gradient_checkpointing = False
        self._requires_grad = True

    def _freeze_parameters(self):
        for param in self.parameters():
            param.requires_grad = False
        self._requires_grad = False

    def forward(self, input_values):
        hidden_states = input_values[:, None]
        if self._requires_grad and self.training:
            hidden_states.requires_grad = True
        for conv_layer in self.conv_layers:
            if self._requires_grad and self.gradient_checkpointing and self.training:
                hidden_states = self._gradient_checkpointing_func(conv_layer.__call__, hidden_states)
            else:
                hidden_states = conv_layer(hidden_states)
        return hidden_states

class Data2VecAudioFeatureProjection(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
        self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
        self.dropout = nn.Dropout(config.feat_proj_dropout)

    def forward(self, hidden_states):
        norm_hidden_states = self.layer_norm(hidden_states)
        hidden_states = self.projection(norm_hidden_states)
        hidden_states = self.dropout(hidden_states)
        return (hidden_states, norm_hidden_states)

class Data2VecAudioAttention(nn.Module):

    def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Data2VecAudioConfig]=None):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        self.config = config
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.is_decoder = is_decoder
        self.is_causal = is_causal
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        is_cross_attention = key_value_states is not None
        (bsz, tgt_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states) * self.scaling
        if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]):
            key_states = past_key_value[0]
            value_states = past_key_value[1]
        elif is_cross_attention:
            key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
            value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
        elif past_key_value is not None:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
            key_states = torch.cat([past_key_value[0], key_states], dim=2)
            value_states = torch.cat([past_key_value[1], value_states], dim=2)
        else:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
        if self.is_decoder:
            past_key_value = (key_states, value_states)
        proj_shape = (bsz * self.num_heads, -1, self.head_dim)
        query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
        key_states = key_states.reshape(*proj_shape)
        value_states = value_states.reshape(*proj_shape)
        src_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
            raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            if attention_mask.size() != (bsz, 1, tgt_len, src_len):
                raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if layer_head_mask is not None:
            if layer_head_mask.size() != (self.num_heads,):
                raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
            attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped, past_key_value)

class Data2VecAudioFlashAttention2(Data2VecAudioAttention):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()

    def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)

    def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if output_attentions:
            raise ValueError('Data2VecAudioFlashAttention2 attention does not support output_attentions')
        is_cross_attention = key_value_states is not None
        (bsz, q_len, _) = hidden_states.size()
        query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
        if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]):
            key_states = past_key_value[0].transpose(1, 2)
            value_states = past_key_value[1].transpose(1, 2)
        elif is_cross_attention:
            key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
            value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
        elif past_key_value is not None:
            key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
            key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
            value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
        else:
            key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
        if self.is_decoder:
            past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
        kv_seq_len = key_states.shape[-2]
        if past_key_value is not None:
            kv_seq_len += past_key_value[0].shape[-2]
        input_dtype = query_states.dtype
        if input_dtype == torch.float32:
            if torch.is_autocast_enabled():
                target_dtype = torch.get_autocast_gpu_dtype()
            elif hasattr(self.config, '_pre_quantization_dtype'):
                target_dtype = self.config._pre_quantization_dtype
            else:
                target_dtype = self.q_proj.weight.dtype
            logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
            query_states = query_states.to(target_dtype)
            key_states = key_states.to(target_dtype)
            value_states = value_states.to(target_dtype)
        attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask)
        attn_output = attn_output.reshape(bsz, q_len, -1)
        attn_output = self.out_proj(attn_output)
        if not output_attentions:
            attn_weights = None
        return (attn_output, attn_weights, past_key_value)

class Data2VecAudioSdpaAttention(Data2VecAudioAttention):

    def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if output_attentions or layer_head_mask is not None:
            logger.warning_once('Data2VecAudioModel is using Data2VecAudioSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
            return super().forward(hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
        is_cross_attention = key_value_states is not None
        (bsz, tgt_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states)
        if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]):
            key_states = past_key_value[0]
            value_states = past_key_value[1]
        elif is_cross_attention:
            key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
            value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
        elif past_key_value is not None:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
            key_states = torch.cat([past_key_value[0], key_states], dim=2)
            value_states = torch.cat([past_key_value[1], value_states], dim=2)
        else:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
        if self.is_decoder:
            past_key_value = (key_states, value_states)
        query_states = self._shape(query_states, tgt_len, bsz)
        is_causal = True if self.is_causal and attention_mask is None and (tgt_len > 1) else False
        attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal)
        if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, None, past_key_value)
DATA2VEC2AUDIO_ATTENTION_CLASSES = {'eager': Data2VecAudioAttention, 'sdpa': Data2VecAudioSdpaAttention, 'flash_attention_2': Data2VecAudioFlashAttention2}

class Data2VecAudioFeedForward(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.intermediate_dropout = nn.Dropout(config.activation_dropout)
        self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act
        self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.output_dropout = nn.Dropout(config.hidden_dropout)

    def forward(self, hidden_states):
        hidden_states = self.intermediate_dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        hidden_states = self.intermediate_dropout(hidden_states)
        hidden_states = self.output_dense(hidden_states)
        hidden_states = self.output_dropout(hidden_states)
        return hidden_states

class Data2VecAudioEncoderLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.attention = DATA2VEC2AUDIO_ATTENTION_CLASSES[config._attn_implementation](embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False)
        self.dropout = nn.Dropout(config.hidden_dropout)
        self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.feed_forward = Data2VecAudioFeedForward(config)
        self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states, attention_mask=None, output_attentions=False):
        attn_residual = hidden_states
        (hidden_states, attn_weights, _) = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
        hidden_states = self.dropout(hidden_states)
        hidden_states = attn_residual + hidden_states
        hidden_states = self.layer_norm(hidden_states)
        hidden_states = hidden_states + self.feed_forward(hidden_states)
        hidden_states = self.final_layer_norm(hidden_states)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class Data2VecAudioEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.pos_conv_embed = Data2VecAudioPositionalConvEmbedding(config)
        self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout)
        self.layers = nn.ModuleList([Data2VecAudioEncoderLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False
        self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2'

    def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        if attention_mask is not None:
            expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
            hidden_states[~expand_attention_mask] = 0
            if self._use_flash_attention_2:
                attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None
            else:
                attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
                attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
                attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
        position_embeddings = self.pos_conv_embed(hidden_states)
        hidden_states = hidden_states + position_embeddings
        hidden_states = self.layer_norm(hidden_states)
        hidden_states = self.dropout(hidden_states)
        deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
        for layer in self.layers:
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            dropout_probability = torch.rand([])
            skip_the_layer = True if self.training and dropout_probability < self.config.layerdrop else False
            if not skip_the_layer or deepspeed_zero3_is_enabled:
                if self.gradient_checkpointing and self.training:
                    layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, attention_mask, output_attentions)
                else:
                    layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
                hidden_states = layer_outputs[0]
            if skip_the_layer:
                layer_outputs = (None, None)
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)

class Data2VecAudioAdapter(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.output_hidden_size != config.hidden_size:
            self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
            self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
        else:
            self.proj = self.proj_layer_norm = None
        self.layers = nn.ModuleList((Data2VecAudioAdapterLayer(config) for _ in range(config.num_adapter_layers)))
        self.layerdrop = config.layerdrop

    def forward(self, hidden_states):
        if self.proj is not None and self.proj_layer_norm is not None:
            hidden_states = self.proj(hidden_states)
            hidden_states = self.proj_layer_norm(hidden_states)
        hidden_states = hidden_states.transpose(1, 2)
        for layer in self.layers:
            layerdrop_prob = np.random.random()
            if not self.training or layerdrop_prob > self.layerdrop:
                hidden_states = layer(hidden_states)
        hidden_states = hidden_states.transpose(1, 2)
        return hidden_states

class Data2VecAudioAdapterLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.conv = nn.Conv1d(config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1)

    def forward(self, hidden_states):
        hidden_states = self.conv(hidden_states)
        hidden_states = nn.functional.glu(hidden_states, dim=1)
        return hidden_states

class Data2VecAudioPreTrainedModel(PreTrainedModel):
    config_class = Data2VecAudioConfig
    base_model_prefix = 'data2vec_audio'
    main_input_name = 'input_values'
    supports_gradient_checkpointing = True
    _supports_flash_attn_2 = True
    _supports_sdpa = True

    def _init_weights(self, module):
        if isinstance(module, Data2VecAudioFeatureProjection):
            k = math.sqrt(1 / module.projection.in_features)
            nn.init.uniform_(module.projection.weight, a=-k, b=k)
            nn.init.uniform_(module.projection.bias, a=-k, b=k)
        elif isinstance(module, Data2VecAudioPositionalConvLayer):
            nn.init.constant_(module.conv.bias, 0)
        elif isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
            if module.bias is not None:
                module.bias.data.zero_()
            if module.weight is not None:
                module.weight.data.fill_(1.0)
        elif isinstance(module, nn.Conv1d):
            nn.init.kaiming_normal_(module.weight)
            if module.bias is not None:
                k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
                nn.init.uniform_(module.bias, a=-k, b=k)

    def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool]=None):
        add_adapter = self.config.add_adapter if add_adapter is None else add_adapter

        def _conv_out_length(input_length, kernel_size, stride):
            return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1
        for (kernel_size, stride) in zip(self.config.conv_kernel, self.config.conv_stride):
            input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
        if add_adapter:
            for _ in range(self.config.num_adapter_layers):
                input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)
        return input_lengths

    def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None):
        non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
        output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
        output_lengths = output_lengths.to(torch.long)
        batch_size = attention_mask.shape[0]
        attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device)
        attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1
        attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
        return attention_mask
DATA2VEC_AUDIO_START_DOCSTRING = '\n    Data2VecAudio was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and\n    Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and\n    Michael Auli.\n\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving etc.).\n\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`Data2VecAudioConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DATA2VEC_AUDIO_INPUTS_DOCSTRING = '\n    Args:\n        input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n            Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n            into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n            soundfile*). To prepare the array into *input_values*, the [`AutoProcessor`] should be used for padding and\n            conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.\n        attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n            1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            \n\n            `attention_mask` should be passed if the corresponding processor has `config.return_attention_mask ==\n            True`, which is the case for all pre-trained Data2Vec Audio models. Be aware that that even with\n            `attention_mask`, zero-padded inputs will have slightly different outputs compared to non-padded inputs\n            because there are more than one convolutional layer in the positional encodings. For a more detailed\n            explanation, see [here](https://github.com/huggingface/transformers/issues/25621#issuecomment-1713759349).\n\n            \n\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare Data2VecAudio Model transformer outputting raw hidden-states without any specific head on top.', DATA2VEC_AUDIO_START_DOCSTRING)
class Data2VecAudioModel(Data2VecAudioPreTrainedModel):

    def __init__(self, config: Data2VecAudioConfig):
        super().__init__(config)
        self.config = config
        self.feature_extractor = Data2VecAudioFeatureEncoder(config)
        self.feature_projection = Data2VecAudioFeatureProjection(config)
        if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
            self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
        self.encoder = Data2VecAudioEncoder(config)
        self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
        self.post_init()

    def freeze_feature_encoder(self):
        self.feature_extractor._freeze_parameters()

    def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
        if not getattr(self.config, 'apply_spec_augment', True):
            return hidden_states
        (batch_size, sequence_length, hidden_size) = hidden_states.size()
        if mask_time_indices is not None:
            hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
        elif self.config.mask_time_prob > 0 and self.training:
            mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks)
            mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
            hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
        if self.config.mask_feature_prob > 0 and self.training:
            mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks)
            mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
            mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
            hidden_states[mask_feature_indices] = 0
        return hidden_states

    @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        extract_features = self.feature_extractor(input_values)
        extract_features = extract_features.transpose(1, 2)
        if attention_mask is not None:
            attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask, add_adapter=False)
        (hidden_states, extract_features) = self.feature_projection(extract_features)
        hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask)
        encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = encoder_outputs[0]
        if self.adapter is not None:
            hidden_states = self.adapter(hidden_states)
        if not return_dict:
            return (hidden_states, extract_features) + encoder_outputs[1:]
        return Wav2Vec2BaseModelOutput(last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

@add_start_docstrings('Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', DATA2VEC_AUDIO_START_DOCSTRING)
class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.data2vec_audio = Data2VecAudioModel(config)
        self.dropout = nn.Dropout(config.final_dropout)
        if config.vocab_size is None:
            raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `Data2VecAudioForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.")
        output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size
        self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
        self.post_init()

    def freeze_feature_extractor(self):
        warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
        self.freeze_feature_encoder()

    def freeze_feature_encoder(self):
        self.data2vec_audio.feature_extractor._freeze_parameters()

    @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS)
    def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[Tuple, CausalLMOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if labels is not None and labels.max() >= self.config.vocab_size:
            raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}')
        outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = outputs[0]
        hidden_states = self.dropout(hidden_states)
        logits = self.lm_head(hidden_states)
        loss = None
        if labels is not None:
            attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
            input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
            labels_mask = labels >= 0
            target_lengths = labels_mask.sum(-1)
            flattened_targets = labels.masked_select(labels_mask)
            log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
            with torch.backends.cudnn.flags(enabled=False):
                loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity)
        if not return_dict:
            output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
            return (loss,) + output if loss is not None else output
        return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    Data2VecAudio Model with a sequence classification head on top (a linear layer over the pooled output) for tasks\n    like SUPERB Keyword Spotting.\n    ', DATA2VEC_AUDIO_START_DOCSTRING)
class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        if hasattr(config, 'add_adapter') and config.add_adapter:
            raise ValueError('Sequence classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)')
        self.data2vec_audio = Data2VecAudioModel(config)
        num_layers = config.num_hidden_layers + 1
        if config.use_weighted_layer_sum:
            self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
        self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
        self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
        self.post_init()

    def freeze_feature_extractor(self):
        warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
        self.freeze_feature_encoder()

    def freeze_feature_encoder(self):
        self.data2vec_audio.feature_extractor._freeze_parameters()

    def freeze_base_model(self):
        for param in self.data2vec_audio.parameters():
            param.requires_grad = False

    @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality='audio')
    def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[Tuple, SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
        outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if self.config.use_weighted_layer_sum:
            hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
            hidden_states = torch.stack(hidden_states, dim=1)
            norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
            hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
        else:
            hidden_states = outputs[0]
        hidden_states = self.projector(hidden_states)
        if attention_mask is None:
            pooled_output = hidden_states.mean(dim=1)
        else:
            padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
            hidden_states[~padding_mask] = 0.0
            pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    Data2VecAudio Model with a frame classification head on top for tasks like Speaker Diarization.\n    ', DATA2VEC_AUDIO_START_DOCSTRING)
class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        if hasattr(config, 'add_adapter') and config.add_adapter:
            raise ValueError('Audio frame classification does not support the use of Data2VecAudio adapters (config.add_adapter=True)')
        self.data2vec_audio = Data2VecAudioModel(config)
        num_layers = config.num_hidden_layers + 1
        if config.use_weighted_layer_sum:
            self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.num_labels = config.num_labels
        self.init_weights()

    def freeze_feature_extractor(self):
        warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
        self.freeze_feature_encoder()

    def freeze_feature_encoder(self):
        self.data2vec_audio.feature_extractor._freeze_parameters()

    def freeze_base_model(self):
        for param in self.data2vec_audio.parameters():
            param.requires_grad = False

    @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality='audio')
    def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
        outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if self.config.use_weighted_layer_sum:
            hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
            hidden_states = torch.stack(hidden_states, dim=1)
            norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
            hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
        else:
            hidden_states = outputs[0]
        logits = self.classifier(hidden_states)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
        if not return_dict:
            output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
            return output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class AMSoftmaxLoss(nn.Module):

    def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
        super(AMSoftmaxLoss, self).__init__()
        self.scale = scale
        self.margin = margin
        self.num_labels = num_labels
        self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
        self.loss = nn.CrossEntropyLoss()

    def forward(self, hidden_states, labels):
        labels = labels.flatten()
        weight = nn.functional.normalize(self.weight, dim=0)
        hidden_states = nn.functional.normalize(hidden_states, dim=1)
        cos_theta = torch.mm(hidden_states, weight)
        psi = cos_theta - self.margin
        onehot = nn.functional.one_hot(labels, self.num_labels)
        logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
        loss = self.loss(logits, labels)
        return loss

class TDNNLayer(nn.Module):

    def __init__(self, config, layer_id=0):
        super().__init__()
        self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
        self.out_conv_dim = config.tdnn_dim[layer_id]
        self.kernel_size = config.tdnn_kernel[layer_id]
        self.dilation = config.tdnn_dilation[layer_id]
        self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
        self.activation = nn.ReLU()

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        if is_peft_available():
            from peft.tuners.lora import LoraLayer
            if isinstance(self.kernel, LoraLayer):
                warnings.warn("Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. You should exclude TDNNLayer from LoRA's target modules.")
        hidden_states = hidden_states.transpose(1, 2)
        weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
        hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
        hidden_states = hidden_states.transpose(1, 2)
        hidden_states = self.activation(hidden_states)
        return hidden_states

@add_start_docstrings('\n    Data2VecAudio Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n    ', DATA2VEC_AUDIO_START_DOCSTRING)
class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.data2vec_audio = Data2VecAudioModel(config)
        num_layers = config.num_hidden_layers + 1
        if config.use_weighted_layer_sum:
            self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
        self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
        tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
        self.tdnn = nn.ModuleList(tdnn_layers)
        self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
        self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
        self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
        self.init_weights()

    def freeze_feature_extractor(self):
        warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
        self.freeze_feature_encoder()

    def freeze_feature_encoder(self):
        self.data2vec_audio.feature_extractor._freeze_parameters()

    def freeze_base_model(self):
        for param in self.data2vec_audio.parameters():
            param.requires_grad = False

    def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):

        def _conv_out_length(input_length, kernel_size, stride):
            return (input_length - kernel_size) // stride + 1
        for kernel_size in self.config.tdnn_kernel:
            input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
        return input_lengths

    @add_start_docstrings_to_model_forward(DATA2VEC_AUDIO_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality='audio')
    def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[Tuple, XVectorOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
        outputs = self.data2vec_audio(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if self.config.use_weighted_layer_sum:
            hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
            hidden_states = torch.stack(hidden_states, dim=1)
            norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
            hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
        else:
            hidden_states = outputs[0]
        hidden_states = self.projector(hidden_states)
        for tdnn_layer in self.tdnn:
            hidden_states = tdnn_layer(hidden_states)
        if attention_mask is None:
            mean_features = hidden_states.mean(dim=1)
            std_features = hidden_states.std(dim=1)
        else:
            feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
            tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
            mean_features = []
            std_features = []
            for (i, length) in enumerate(tdnn_output_lengths):
                mean_features.append(hidden_states[i, :length].mean(dim=0))
                std_features.append(hidden_states[i, :length].std(dim=0))
            mean_features = torch.stack(mean_features)
            std_features = torch.stack(std_features)
        statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
        output_embeddings = self.feature_extractor(statistic_pooling)
        logits = self.classifier(output_embeddings)
        loss = None
        if labels is not None:
            loss = self.objective(logits, labels)
        if not return_dict:
            output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
            return (loss,) + output if loss is not None else output
        return XVectorOutput(loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/data2vec/modeling_data2vec_text.py
""""""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_data2vec_text import Data2VecTextConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
_CHECKPOINT_FOR_DOC = 'facebook/data2vec-text-base'
_CONFIG_FOR_DOC = 'Data2VecTextConfig'

class Data2VecTextForTextEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
        self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
        self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
        self.padding_idx = config.pad_token_id
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)

    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
        if position_ids is None:
            if input_ids is not None:
                position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
            else:
                position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]
        seq_length = input_shape[1]
        if token_type_ids is None:
            if hasattr(self, 'token_type_ids'):
                buffered_token_type_ids = self.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)
        embeddings = inputs_embeds + token_type_embeddings
        if self.position_embedding_type == 'absolute':
            position_embeddings = self.position_embeddings(position_ids)
            embeddings += position_embeddings
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

    def create_position_ids_from_inputs_embeds(self, inputs_embeds):
        input_shape = inputs_embeds.size()[:-1]
        sequence_length = input_shape[1]
        position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
        return position_ids.unsqueeze(0).expand(input_shape)

class Data2VecTextSelfAttention(nn.Module):

    def __init__(self, config, position_embedding_type=None):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
        self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
        if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
            self.max_position_embeddings = config.max_position_embeddings
            self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
        self.is_decoder = config.is_decoder

    def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]:
        mixed_query_layer = self.query(hidden_states)
        is_cross_attention = encoder_hidden_states is not None
        if is_cross_attention and past_key_value is not None:
            key_layer = past_key_value[0]
            value_layer = past_key_value[1]
            attention_mask = encoder_attention_mask
        elif is_cross_attention:
            key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
            value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
            attention_mask = encoder_attention_mask
        elif past_key_value is not None:
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))
            key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
            value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
        else:
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))
        query_layer = self.transpose_for_scores(mixed_query_layer)
        use_cache = past_key_value is not None
        if self.is_decoder:
            past_key_value = (key_layer, value_layer)
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
            (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2])
            if use_cache:
                position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
            else:
                position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
            position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
            distance = position_ids_l - position_ids_r
            positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
            positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
            if self.position_embedding_type == 'relative_key':
                relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores
            elif self.position_embedding_type == 'relative_key_query':
                relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
                relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)
        attention_probs = self.dropout(attention_probs)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        if self.is_decoder:
            outputs = outputs + (past_key_value,)
        return outputs

class Data2VecTextSelfOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states
DATA2VEC_TEXT_SELF_ATTENTION_CLASSES = {'eager': Data2VecTextSelfAttention}

class Data2VecTextAttention(nn.Module):

    def __init__(self, config, position_embedding_type=None):
        super().__init__()
        self.self = DATA2VEC_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type)
        self.output = Data2VecTextSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]:
        self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

class Data2VecTextIntermediate(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class Data2VecTextOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class Data2VecTextLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = Data2VecTextAttention(config)
        self.is_decoder = config.is_decoder
        self.add_cross_attention = config.add_cross_attention
        if self.add_cross_attention:
            if not self.is_decoder:
                raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
            self.crossattention = Data2VecTextAttention(config, position_embedding_type='absolute')
        self.intermediate = Data2VecTextIntermediate(config)
        self.output = Data2VecTextOutput(config)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]:
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value)
        attention_output = self_attention_outputs[0]
        if self.is_decoder:
            outputs = self_attention_outputs[1:-1]
            present_key_value = self_attention_outputs[-1]
        else:
            outputs = self_attention_outputs[1:]
        cross_attn_present_key_value = None
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, 'crossattention'):
                raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
            cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions)
            attention_output = cross_attention_outputs[0]
            outputs = outputs + cross_attention_outputs[1:-1]
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value
        layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
        outputs = (layer_output,) + outputs
        if self.is_decoder:
            outputs = outputs + (present_key_value,)
        return outputs

    def feed_forward_chunk(self, attention_output):
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output

class Data2VecTextEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([Data2VecTextLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
        if self.gradient_checkpointing and self.training:
            if use_cache:
                logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
                use_cache = False
        next_decoder_cache = () if use_cache else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_head_mask = head_mask[i] if head_mask is not None else None
            past_key_value = past_key_values[i] if past_key_values is not None else None
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
            else:
                layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
            hidden_states = layer_outputs[0]
            if use_cache:
                next_decoder_cache += (layer_outputs[-1],)
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
        return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)

class Data2VecTextPooler(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output

class Data2VecTextPreTrainedModel(PreTrainedModel):
    config_class = Data2VecTextConfig
    base_model_prefix = 'data2vec_text'
    supports_gradient_checkpointing = True
    _no_split_modules = ['Data2VecTextForTextEmbeddings', 'Data2VecTextLayer']

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            if hasattr(module, 'bias') and module.bias is not None:
                module.bias.data.zero_()
            if hasattr(module, 'weight') and module.weight is not None:
                module.weight.data.fill_(1.0)
DATA2VECTEXT_START_DOCSTRING = '\n    Data2VecText was proposed in [data2vec: A General Framework for Self-supervised Learning in Speech, Vision and\n    Language](https://arxiv.org/pdf/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu and\n    Michael Auli.\n\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`Data2VecTextConfig`]): Model configuration class with all the parameters of the\n            model. Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DATA2VECTEXT_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare Data2VecText Model for text transformer outputting raw hidden-states without any specific head on top.', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextModel(Data2VecTextPreTrainedModel):

    def __init__(self, config, add_pooling_layer=True):
        super().__init__(config)
        self.config = config
        self.embeddings = Data2VecTextForTextEmbeddings(config)
        self.encoder = Data2VecTextEncoder(config)
        self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if self.config.is_decoder:
            use_cache = use_cache if use_cache is not None else self.config.use_cache
        else:
            use_cache = False
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        (batch_size, seq_length) = input_shape
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
        if attention_mask is None:
            attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
        if token_type_ids is None:
            if hasattr(self.embeddings, 'token_type_ids'):
                buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
                buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
                token_type_ids = buffered_token_type_ids_expanded
            else:
                token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
        extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
        if self.config.is_decoder and encoder_hidden_states is not None:
            (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size()
            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
            if encoder_attention_mask is None:
                encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
            encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
        else:
            encoder_extended_attention_mask = None
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
        embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
        encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = encoder_outputs[0]
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
        if not return_dict:
            return (sequence_output, pooled_output) + encoder_outputs[1:]
        return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)

@add_start_docstrings('Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel):
    _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']

    def __init__(self, config):
        super().__init__(config)
        if not config.is_decoder:
            logger.warning('If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`')
        self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
        self.lm_head = Data2VecTextLMHead(config)
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_head.decoder

    def set_output_embeddings(self, new_embeddings):
        self.lm_head.decoder = new_embeddings

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if labels is not None:
            use_cache = False
        outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        prediction_scores = self.lm_head(sequence_output)
        lm_loss = None
        if labels is not None:
            shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
            labels = labels[:, 1:].contiguous()
            loss_fct = CrossEntropyLoss()
            labels = labels.to(shifted_prediction_scores.device)
            lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return (lm_loss,) + output if lm_loss is not None else output
        return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)

    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
        input_shape = input_ids.shape
        if attention_mask is None:
            attention_mask = input_ids.new_ones(input_shape)
        if past_key_values is not None:
            past_length = past_key_values[0][0].shape[2]
            if input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                remove_prefix_length = input_ids.shape[1] - 1
            input_ids = input_ids[:, remove_prefix_length:]
        return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values}

    def _reorder_cache(self, past_key_values, beam_idx):
        reordered_past = ()
        for layer_past in past_key_values:
            reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),)
        return reordered_past

@add_start_docstrings('data2vec Model with a `language modeling` head on top.', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForMaskedLM(Data2VecTextPreTrainedModel):
    _tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']

    def __init__(self, config):
        super().__init__(config)
        if config.is_decoder:
            logger.warning('If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
        self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
        self.lm_head = Data2VecTextLMHead(config)
        self.post_init()

    def get_output_embeddings(self):
        return self.lm_head.decoder

    def set_output_embeddings(self, new_embeddings):
        self.lm_head.decoder = new_embeddings

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='')
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        prediction_scores = self.lm_head(sequence_output)
        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            labels = labels.to(prediction_scores.device)
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return (masked_lm_loss,) + output if masked_lm_loss is not None else output
        return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class Data2VecTextLMHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
        self.decoder.bias = self.bias

    def forward(self, features, **kwargs):
        x = self.dense(features)
        x = gelu(x)
        x = self.layer_norm(x)
        x = self.decoder(x)
        return x

    def _tie_weights(self):
        if self.decoder.bias.device.type == 'meta':
            self.decoder.bias = self.bias
        else:
            self.bias = self.decoder.bias

@add_start_docstrings('\n    Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForSequenceClassification(Data2VecTextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config
        self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
        self.classifier = Data2VecTextClassificationHead(config)
        self.post_init()

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            labels = labels.to(logits.device)
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    Data2VecText Model with a multiple choice classification head on top (a linear layer on top of the pooled output\n    and a softmax) e.g. for RocStories/SWAG tasks.\n    ', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForMultipleChoice(Data2VecTextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.data2vec_text = Data2VecTextModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, 1)
        self.post_init()

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
        flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
        outputs = self.data2vec_text(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            labels = labels.to(reshaped_logits.device)
            loss = loss_fct(reshaped_logits, labels)
        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    Data2VecText Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n    for Named-Entity-Recognition (NER) tasks.\n    ', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForTokenClassification(Data2VecTextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            labels = labels.to(logits.device)
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class Data2VecTextClassificationHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)

    def forward(self, features, **kwargs):
        x = features[:, 0, :]
        x = self.dropout(x)
        x = self.dense(x)
        x = torch.tanh(x)
        x = self.dropout(x)
        x = self.out_proj(x)
        return x

@add_start_docstrings('\n    Data2VecText Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n    linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', DATA2VECTEXT_START_DOCSTRING)
class Data2VecTextForQuestionAnswering(Data2VecTextPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DATA2VECTEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_text(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1).contiguous()
        end_logits = end_logits.squeeze(-1).contiguous()
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return (total_loss,) + output if total_loss is not None else output
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
    mask = input_ids.ne(padding_idx).int()
    incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
    return incremental_indices.long() + padding_idx

# File: transformers-main/src/transformers/models/data2vec/modeling_data2vec_vision.py
""""""
import collections.abc
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int
from .configuration_data2vec_vision import Data2VecVisionConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'Data2VecVisionConfig'
_CHECKPOINT_FOR_DOC = 'facebook/data2vec-vision-base'
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
_IMAGE_CLASS_CHECKPOINT = 'facebook/data2vec-vision-base-ft1k'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'remote control, remote'

@dataclass
class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):

def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor:
    if drop_prob == 0.0 or not training:
        return input
    keep_prob = 1 - drop_prob
    shape = (input.shape[0],) + (1,) * (input.ndim - 1)
    random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
    random_tensor.floor_()
    output = input.div(keep_prob) * random_tensor
    return output

class Data2VecVisionDropPath(nn.Module):

    def __init__(self, drop_prob: Optional[float]=None) -> None:
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        return drop_path(hidden_states, self.drop_prob, self.training)

    def extra_repr(self) -> str:
        return 'p={}'.format(self.drop_prob)

class Data2VecVisionEmbeddings(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
        if config.use_mask_token:
            self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
        else:
            self.mask_token = None
        self.patch_embeddings = Data2VecVisionPatchEmbeddings(config)
        self.patch_size = config.patch_size
        self.image_size = config.image_size if isinstance(config.image_size, collections.abc.Iterable) else (config.image_size, config.image_size)
        num_patches = self.patch_embeddings.num_patches
        if config.use_absolute_position_embeddings:
            self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
        else:
            self.position_embeddings = None
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
        num_patches = embeddings.shape[1] - 1
        num_positions = self.position_embeddings.shape[1] - 1
        if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
            return self.position_embeddings
        class_pos_embed = self.position_embeddings[:, :1]
        patch_pos_embed = self.position_embeddings[:, 1:]
        dim = embeddings.shape[-1]
        new_height = height // self.patch_size
        new_width = width // self.patch_size
        sqrt_num_positions = torch_int(num_positions ** 0.5)
        patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
        patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
        patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
        patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
        return torch.cat((class_pos_embed, patch_pos_embed), dim=1)

    def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
        (_, _, height, width) = pixel_values.shape
        (embeddings, (patch_height, patch_width)) = self.patch_embeddings(pixel_values, self.position_embeddings[:, 1:, :] if self.position_embeddings is not None else None)
        (batch_size, seq_len, _) = embeddings.size()
        if bool_masked_pos is not None:
            mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
            w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
            embeddings = embeddings * (1 - w) + mask_tokens * w
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)
        if self.position_embeddings is not None:
            if interpolate_pos_encoding:
                cls_tokens = cls_tokens + self.interpolate_pos_encoding(embeddings, height, width)
            else:
                cls_tokens = cls_tokens + self.position_embeddings[:, :1, :]
        embeddings = torch.cat((cls_tokens, embeddings), dim=1)
        embeddings = self.dropout(embeddings)
        return (embeddings, (patch_height, patch_width))

class Data2VecVisionPatchEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        (image_size, patch_size) = (config.image_size, config.patch_size)
        (num_channels, hidden_size) = (config.num_channels, config.hidden_size)
        image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
        patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
        patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.num_patches = num_patches
        self.patch_shape = patch_shape
        self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)

    def forward(self, pixel_values: torch.Tensor, position_embedding: Optional[torch.Tensor]=None) -> torch.Tensor:
        (batch_size, num_channels, height, width) = pixel_values.shape
        if num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        embeddings = self.projection(pixel_values)
        (patch_height, patch_width) = (embeddings.shape[2], embeddings.shape[3])
        if position_embedding is not None:
            position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1).permute(0, 3, 1, 2)
            position_embedding = nn.functional.interpolate(position_embedding, size=(patch_height, patch_width), mode='bicubic')
            embeddings = embeddings + position_embedding
        embeddings = embeddings.flatten(2).transpose(1, 2)
        return (embeddings, (patch_height, patch_width))

class Data2VecVisionSelfAttention(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
        super().__init__()
        self.config = config
        if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
            raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
        if window_size:
            self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
        else:
            self.relative_position_bias = None

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['Data2VecVisionRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
        mixed_query_layer = self.query(hidden_states)
        key_layer = self.transpose_for_scores(self.key(hidden_states))
        value_layer = self.transpose_for_scores(self.value(hidden_states))
        query_layer = self.transpose_for_scores(mixed_query_layer)
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if self.relative_position_bias is not None:
            (height, width) = resolution
            window_size = (height // self.config.patch_size, width // self.config.patch_size)
            attention_scores = attention_scores + self.relative_position_bias(window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1])
        if relative_position_bias is not None:
            attention_scores = attention_scores + relative_position_bias
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)
        attention_probs = self.dropout(attention_probs)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

class Data2VecVisionSelfOutput(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class Data2VecVisionAttention(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
        super().__init__()
        self.attention = Data2VecVisionSelfAttention(config, window_size=window_size)
        self.output = Data2VecVisionSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
        self.attention.query = prune_linear_layer(self.attention.query, index)
        self.attention.key = prune_linear_layer(self.attention.key, index)
        self.attention.value = prune_linear_layer(self.attention.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
        self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
        self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['Data2VecVisionRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
        self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution)
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

class Data2VecVisionIntermediate(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class Data2VecVisionOutput(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class Data2VecVisionLayer(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, drop_path_rate: float=0.0) -> None:
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = Data2VecVisionAttention(config, window_size=window_size)
        self.intermediate = Data2VecVisionIntermediate(config)
        self.output = Data2VecVisionOutput(config)
        self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
        self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        init_values = config.layer_scale_init_value
        if init_values > 0:
            self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
            self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
        else:
            (self.lambda_1, self.lambda_2) = (None, None)

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, relative_position_bias: Optional['Data2VecVisionRelativePositionBias']=None, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
        self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, interpolate_pos_encoding=interpolate_pos_encoding, resolution=resolution)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        if self.lambda_1 is not None:
            attention_output = self.lambda_1 * attention_output
        hidden_states = self.drop_path(attention_output) + hidden_states
        layer_output = self.layernorm_after(hidden_states)
        layer_output = self.intermediate(layer_output)
        layer_output = self.output(layer_output)
        if self.lambda_2 is not None:
            layer_output = self.lambda_2 * layer_output
        layer_output = self.drop_path(layer_output) + hidden_states
        outputs = (layer_output,) + outputs
        return outputs

class Data2VecVisionRelativePositionBias(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
        super().__init__()
        self.window_size = window_size
        self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
        self.relative_position_bias_table = nn.Parameter(torch.zeros(self.num_relative_distance, config.num_attention_heads))
        self.relative_position_indices = {}

    def generate_relative_position_index(self, window_size: Tuple[int, int]) -> torch.Tensor:
        num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
        window_area = window_size[0] * window_size[1]
        grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing='ij')
        coords = torch.stack(grid)
        coords_flatten = torch.flatten(coords, 1)
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
        relative_coords = relative_coords.permute(1, 2, 0).contiguous()
        relative_coords[:, :, 0] += window_size[0] - 1
        relative_coords[:, :, 1] += window_size[1] - 1
        relative_coords[:, :, 0] *= 2 * window_size[1] - 1
        relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
        relative_position_index[1:, 1:] = relative_coords.sum(-1)
        relative_position_index[0, 0:] = num_relative_distance - 3
        relative_position_index[0:, 0] = num_relative_distance - 2
        relative_position_index[0, 0] = num_relative_distance - 1
        return relative_position_index

    def forward(self, window_size, interpolate_pos_encoding: bool=False, dim_size=None) -> torch.Tensor:
        old_height = 2 * self.window_size[0] - 1
        old_width = 2 * self.window_size[1] - 1
        new_height = 2 * window_size[0] - 1
        new_width = 2 * window_size[1] - 1
        old_relative_position_bias_table = self.relative_position_bias_table
        old_num_relative_distance = self.num_relative_distance
        new_num_relative_distance = new_height * new_width + 3
        old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
        old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
        new_sub_table = nn.functional.interpolate(old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode='bilinear')
        new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
        new_relative_position_bias_table = torch.cat([new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]])
        key = window_size
        if key not in self.relative_position_indices.keys():
            self.relative_position_indices[key] = self.generate_relative_position_index(window_size)
        relative_position_bias = new_relative_position_bias_table[self.relative_position_indices[key].view(-1)]
        relative_position_bias = relative_position_bias.view(window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1)
        relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
        if interpolate_pos_encoding:
            relative_position_bias = nn.functional.interpolate(relative_position_bias.unsqueeze(1), size=(dim_size, dim_size), mode='bilinear', align_corners=False).squeeze(1)
        return relative_position_bias.unsqueeze(0)

class Data2VecVisionEncoder(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None) -> None:
        super().__init__()
        self.config = config
        if config.use_shared_relative_position_bias:
            self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
        else:
            self.relative_position_bias = None
        dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
        self.layer = nn.ModuleList([Data2VecVisionLayer(config, window_size=window_size if config.use_relative_position_bias else None, drop_path_rate=dpr[i]) for i in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, interpolate_pos_encoding: bool=False, resolution: Optional[Tuple[int]]=None, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_head_mask = head_mask[i] if head_mask is not None else None
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions)
            else:
                (height, width) = resolution
                window_size = (height // self.config.patch_size, width // self.config.patch_size)
                relative_position_bias = self.relative_position_bias(window_size, interpolate_pos_encoding=interpolate_pos_encoding, dim_size=hidden_states.shape[1]) if self.relative_position_bias is not None else None
                layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)

class Data2VecVisionPreTrainedModel(PreTrainedModel):
    config_class = Data2VecVisionConfig
    base_model_prefix = 'data2vec_vision'
    main_input_name = 'pixel_values'
    supports_gradient_checkpointing = True
    _no_split_modules = ['Data2VecVisionLayer']
    _keys_to_ignore_on_load_unexpected = ['.*relative_position_index.*']

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
DATA2VEC_VISION_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DATA2VEC_VISION_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`BeitImageProcessor.__call__`] for details.\n\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n            Whether to interpolate the pre-trained position encodings.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.', DATA2VEC_VISION_START_DOCSTRING)
class Data2VecVisionModel(Data2VecVisionPreTrainedModel):

    def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool=False) -> None:
        super().__init__(config)
        self.config = config
        self.embeddings = Data2VecVisionEmbeddings(config)
        self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
        self.layernorm = nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.patch_embeddings

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Data2VecVisionModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
        (embedding_output, _) = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
        resolution = pixel_values.shape[2:]
        encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, resolution=resolution, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output)
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
        if not return_dict:
            head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return Data2VecVisionModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

class Data2VecVisionPooler(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        if self.layernorm is not None:
            patch_tokens = hidden_states[:, 1:, :]
            pooled_output = self.layernorm(patch_tokens.mean(1))
        else:
            pooled_output = hidden_states[:, 0]
        return pooled_output

@add_start_docstrings('\n    Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of\n    the final hidden states of the patch tokens) e.g. for ImageNet.\n    ', DATA2VEC_VISION_START_DOCSTRING)
class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_vision(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class Data2VecVisionConvModule(nn.Module):

    def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], padding: Union[int, Tuple[int, int], str]=0, bias: bool=False, dilation: Union[int, Tuple[int, int]]=1) -> None:
        super().__init__()
        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, bias=bias, dilation=dilation)
        self.bn = nn.BatchNorm2d(out_channels)
        self.activation = nn.ReLU()

    def forward(self, input: torch.Tensor) -> torch.Tensor:
        output = self.conv(input)
        output = self.bn(output)
        output = self.activation(output)
        return output

class Data2VecVisionPyramidPoolingBlock(nn.Module):

    def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
        super().__init__()
        self.layers = [nn.AdaptiveAvgPool2d(pool_scale), Data2VecVisionConvModule(in_channels, channels, kernel_size=1)]
        for (i, layer) in enumerate(self.layers):
            self.add_module(str(i), layer)

    def forward(self, input: torch.Tensor) -> torch.Tensor:
        hidden_state = input
        for layer in self.layers:
            hidden_state = layer(hidden_state)
        return hidden_state

class Data2VecVisionPyramidPoolingModule(nn.Module):

    def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
        super().__init__()
        self.pool_scales = pool_scales
        self.align_corners = align_corners
        self.in_channels = in_channels
        self.channels = channels
        self.blocks = []
        for (i, pool_scale) in enumerate(pool_scales):
            block = Data2VecVisionPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
            self.blocks.append(block)
            self.add_module(str(i), block)

    def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
        ppm_outs = []
        for ppm in self.blocks:
            ppm_out = ppm(x)
            upsampled_ppm_out = nn.functional.interpolate(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)
            ppm_outs.append(upsampled_ppm_out)
        return ppm_outs

class Data2VecVisionUperHead(nn.Module):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__()
        self.pool_scales = config.pool_scales
        self.in_channels = [config.hidden_size] * 4
        self.channels = config.hidden_size
        self.align_corners = False
        self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
        self.psp_modules = Data2VecVisionPyramidPoolingModule(self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners)
        self.bottleneck = Data2VecVisionConvModule(self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding=1)
        self.lateral_convs = nn.ModuleList()
        self.fpn_convs = nn.ModuleList()
        for in_channels in self.in_channels[:-1]:
            l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
            fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
            self.lateral_convs.append(l_conv)
            self.fpn_convs.append(fpn_conv)
        self.fpn_bottleneck = Data2VecVisionConvModule(len(self.in_channels) * self.channels, self.channels, kernel_size=3, padding=1)

    def psp_forward(self, inputs):
        x = inputs[-1]
        psp_outs = [x]
        psp_outs.extend(self.psp_modules(x))
        psp_outs = torch.cat(psp_outs, dim=1)
        output = self.bottleneck(psp_outs)
        return output

    def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
        laterals = [lateral_conv(encoder_hidden_states[i]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
        laterals.append(self.psp_forward(encoder_hidden_states))
        used_backbone_levels = len(laterals)
        for i in range(used_backbone_levels - 1, 0, -1):
            prev_shape = laterals[i - 1].shape[2:]
            laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(laterals[i], size=prev_shape, mode='bilinear', align_corners=self.align_corners)
        fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
        fpn_outs.append(laterals[-1])
        for i in range(used_backbone_levels - 1, 0, -1):
            fpn_outs[i] = nn.functional.interpolate(fpn_outs[i], size=fpn_outs[0].shape[2:], mode='bilinear', align_corners=self.align_corners)
        fpn_outs = torch.cat(fpn_outs, dim=1)
        output = self.fpn_bottleneck(fpn_outs)
        output = self.classifier(output)
        return output

class Data2VecVisionFCNHead(nn.Module):

    def __init__(self, config: Data2VecVisionConfig, in_index: int=2, kernel_size: int=3, dilation: Union[int, Tuple[int, int]]=1) -> None:
        super().__init__()
        self.in_channels = config.hidden_size
        self.channels = config.auxiliary_channels
        self.num_convs = config.auxiliary_num_convs
        self.concat_input = config.auxiliary_concat_input
        self.in_index = in_index
        conv_padding = kernel_size // 2 * dilation
        convs = []
        convs.append(Data2VecVisionConvModule(self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
        for i in range(self.num_convs - 1):
            convs.append(Data2VecVisionConvModule(self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation))
        if self.num_convs == 0:
            self.convs = nn.Identity()
        else:
            self.convs = nn.Sequential(*convs)
        if self.concat_input:
            self.conv_cat = Data2VecVisionConvModule(self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2)
        self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)

    def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = encoder_hidden_states[self.in_index]
        output = self.convs(hidden_states)
        if self.concat_input:
            output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
        output = self.classifier(output)
        return output

@add_start_docstrings('\n    Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.\n    ', DATA2VEC_VISION_START_DOCSTRING)
class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):

    def __init__(self, config: Data2VecVisionConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=False)
        if len(self.config.out_indices) != 4:
            raise ValueError('Data2VecVisionForSemanticSegmentation requires config.out_indices to be a list of 4 integers, specifying which features to use from the backbone. One can use [3, 5, 7, 11] in case of a base-sized architecture.')
        self.fpn1 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2), nn.BatchNorm2d(config.hidden_size), nn.GELU(), nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2))
        self.fpn2 = nn.Sequential(nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2))
        self.fpn3 = nn.Identity()
        self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.decode_head = Data2VecVisionUperHead(config)
        self.auxiliary_head = Data2VecVisionFCNHead(config) if config.use_auxiliary_head else None
        self.post_init()

    def compute_loss(self, logits, auxiliary_logits, labels):
        upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
        if auxiliary_logits is not None:
            upsampled_auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
        loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
        main_loss = loss_fct(upsampled_logits, labels)
        loss = main_loss
        if auxiliary_logits is not None:
            auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
            loss += self.config.auxiliary_loss_weight * auxiliary_loss
        return loss

    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        if labels is not None and self.config.num_labels == 1:
            raise ValueError('The number of labels should be greater than one')
        outputs = self.data2vec_vision(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
        encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
        features = [feature for (idx, feature) in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
        batch_size = pixel_values.shape[0]
        patch_resolution = self.config.image_size // self.config.patch_size
        features = [x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features]
        ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
        for i in range(len(features)):
            features[i] = ops[i](features[i])
        logits = self.decode_head(features)
        auxiliary_logits = None
        if self.auxiliary_head is not None:
            auxiliary_logits = self.auxiliary_head(features)
        loss = None
        if labels is not None:
            loss = self.compute_loss(logits, auxiliary_logits, labels)
        if not return_dict:
            if output_hidden_states:
                output = (logits,) + outputs[1:]
            else:
                output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py
""""""
from __future__ import annotations
import collections.abc
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSemanticSegmenterOutput, TFSequenceClassifierOutput
from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_data2vec_vision import Data2VecVisionConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'Data2VecVisionConfig'
_CHECKPOINT_FOR_DOC = 'facebook/data2vec-vision-base'
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
_IMAGE_CLASS_CHECKPOINT = 'facebook/data2vec-vision-base-ft1k'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'remote control, remote'

@dataclass
class TFData2VecVisionModelOutputWithPooling(TFBaseModelOutputWithPooling):
    last_hidden_state: tf.Tensor = None
    pooler_output: tf.Tensor = None
    hidden_states: Tuple[tf.Tensor] | None = None
    attentions: Tuple[tf.Tensor] | None = None

class TFData2VecVisionDropPath(keras.layers.Layer):

    def __init__(self, drop_path, **kwargs):
        super().__init__(**kwargs)
        self.drop_path = drop_path

    def call(self, x, training=None):
        if training:
            keep_prob = 1 - self.drop_path
            shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
            random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
            random_tensor = tf.floor(random_tensor)
            return x / keep_prob * random_tensor
        return x

class TFData2VecVisionEmbeddings(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.patch_embeddings = TFData2VecVisionPatchEmbeddings(config, name='patch_embeddings')
        self.num_patches = self.patch_embeddings.num_patches
        self.config = config
        self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)

    def build(self, input_shape=None):
        self.cls_token = self.add_weight(shape=(1, 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name='cls_token')
        if self.config.use_mask_token:
            self.mask_token = self.add_weight(shape=(1, 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name='mask_token')
        else:
            self.mask_token = None
        if self.config.use_absolute_position_embeddings:
            self.position_embeddings = self.add_weight(shape=(1, self.num_patches + 1, self.config.hidden_size), initializer=tf.random_normal_initializer(stddev=self.config.initializer_range), trainable=True, name='position_embeddings')
        else:
            self.position_embeddings = None
        if self.built:
            return
        self.built = True
        if getattr(self, 'patch_embeddings', None) is not None:
            with tf.name_scope(self.patch_embeddings.name):
                self.patch_embeddings.build(None)

    def call(self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None=None) -> tf.Tensor:
        embeddings = self.patch_embeddings(pixel_values)
        (batch_size, seq_len, projection_dim) = shape_list(embeddings)
        cls_tokens = tf.tile(self.cls_token, (batch_size, 1, 1))
        if bool_masked_pos is not None:
            mask_tokens = tf.broadcast_to(self.mask_token, (batch_size, seq_len, projection_dim))
            w = bool_masked_pos[..., None]
            w = tf.cast(w, mask_tokens.dtype)
            embeddings = embeddings * (1 - w) + mask_tokens * w
        embeddings = tf.concat([cls_tokens, embeddings], axis=1)
        if self.position_embeddings is not None:
            embeddings = embeddings + self.position_embeddings
        embeddings = self.dropout(embeddings)
        return embeddings

class TFData2VecVisionPatchEmbeddings(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        (image_size, patch_size) = (config.image_size, config.patch_size)
        (num_channels, hidden_size) = (config.num_channels, config.hidden_size)
        image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
        patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
        patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_patches = num_patches
        self.patch_shape = patch_shape
        self.num_channels = num_channels
        self.projection = keras.layers.Conv2D(filters=hidden_size, kernel_size=patch_size, strides=patch_size, padding='valid', data_format='channels_last', kernel_initializer='glorot_uniform', bias_initializer='zeros', name='projection')

    def call(self, pixel_values: tf.Tensor, training: bool=False) -> tf.Tensor:
        (batch_size, num_channels, height, width) = shape_list(pixel_values)
        if tf.executing_eagerly():
            if num_channels != self.num_channels:
                raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
            if height != self.image_size[0] or width != self.image_size[1]:
                raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
        pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
        projection = self.projection(pixel_values)
        num_patches = width // self.patch_size[1] * (height // self.patch_size[0])
        return tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'projection', None) is not None:
            with tf.name_scope(self.projection.name):
                self.projection.build([None, None, None, self.num_channels])

class TFData2VecVisionSelfAttention(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, **kwargs):
        super().__init__(**kwargs)
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
        self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query')
        self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key', use_bias=False)
        self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value')
        self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
        if window_size:
            self.relative_position_bias = TFData2VecVisionRelativePositionBias(config, window_size=window_size, name='relative_position_bias')
        else:
            self.relative_position_bias = None
        self.config = config

    def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
        tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
        return tf.transpose(tensor, perm=[0, 2, 1, 3])

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: Optional['TFData2VecVisionRelativePositionBias']=None, training: bool=False) -> Tuple[tf.Tensor]:
        batch_size = shape_list(hidden_states)[0]
        mixed_query_layer = self.query(inputs=hidden_states)
        mixed_key_layer = self.key(inputs=hidden_states)
        mixed_value_layer = self.value(inputs=hidden_states)
        query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
        key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
        value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
        attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
        attention_scores = attention_scores / self.sqrt_att_head_size
        if self.relative_position_bias is not None:
            attention_scores = attention_scores + self.relative_position_bias(0.0)[None, ...]
        if relative_position_bias is not None:
            attention_scores = attention_scores + relative_position_bias
        attention_probs = stable_softmax(logits=attention_scores, axis=-1)
        attention_probs = self.dropout(inputs=attention_probs, training=training)
        if head_mask is not None:
            attention_probs = tf.multiply(attention_probs, head_mask)
        attention_output = tf.matmul(attention_probs, value_layer)
        attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
        attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
        outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'query', None) is not None:
            with tf.name_scope(self.query.name):
                self.query.build([None, None, self.config.hidden_size])
        if getattr(self, 'key', None) is not None:
            with tf.name_scope(self.key.name):
                self.key.build([None, None, self.config.hidden_size])
        if getattr(self, 'value', None) is not None:
            with tf.name_scope(self.value.name):
                self.value.build([None, None, self.config.hidden_size])
        if getattr(self, 'relative_position_bias', None) is not None:
            with tf.name_scope(self.relative_position_bias.name):
                self.relative_position_bias.build(None)

class TFData2VecVisionSelfOutput(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, gamma=None, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFData2VecVisionAttention(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFData2VecVisionSelfAttention(config, window_size=window_size, name='attention')
        self.dense_output = TFData2VecVisionSelfOutput(config, name='output')

    def prune_heads(self, heads):
        raise NotImplementedError

    def call(self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: Optional['TFData2VecVisionRelativePositionBias']=None, training: bool=False) -> Tuple[tf.Tensor]:
        self_outputs = self.attention(hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, training=training)
        attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=input_tensor, training=training)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class TFData2VecVisionIntermediate(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.intermediate_act_fn = config.hidden_act
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFData2VecVisionOutput(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.intermediate_size])

class TFData2VecVisionLayer(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, drop_path_rate: float=0.0, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.attention = TFData2VecVisionAttention(config, window_size=window_size, name='attention')
        self.intermediate = TFData2VecVisionIntermediate(config, name='intermediate')
        self.data2vec_output = TFData2VecVisionOutput(config, name='output')
        self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm_before')
        self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm_after')
        self.drop_path = TFData2VecVisionDropPath(drop_path_rate, name='drop_path') if drop_path_rate > 0.0 else keras.layers.Activation('linear', name='drop_path')
        self.init_values = config.layer_scale_init_value

    def build(self, input_shape: tf.TensorShape=None):
        if self.init_values > 0:
            self.lambda_1 = self.add_weight(shape=self.config.hidden_size, initializer='ones', trainable=True, name='lambda_1')
            self.lambda_2 = self.add_weight(shape=self.config.hidden_size, initializer='ones', trainable=True, name='lambda_2')
            self.lambda_1.assign(self.init_values * tf.ones(self.config.hidden_size))
            self.lambda_2.assign(self.init_values * tf.ones(self.config.hidden_size))
        else:
            (self.lambda_1, self.lambda_2) = (None, None)
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'data2vec_output', None) is not None:
            with tf.name_scope(self.data2vec_output.name):
                self.data2vec_output.build(None)
        if getattr(self, 'layernorm_before', None) is not None:
            with tf.name_scope(self.layernorm_before.name):
                self.layernorm_before.build([None, None, self.config.hidden_size])
        if getattr(self, 'layernorm_after', None) is not None:
            with tf.name_scope(self.layernorm_after.name):
                self.layernorm_after.build([None, None, self.config.hidden_size])
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, relative_position_bias: Optional['TFData2VecVisionRelativePositionBias']=None, training: bool=False) -> Tuple[tf.Tensor]:
        self_attention_outputs = self.attention(input_tensor=self.layernorm_before(inputs=hidden_states), head_mask=head_mask, output_attentions=output_attentions, relative_position_bias=relative_position_bias, training=training)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        if self.lambda_1 is not None:
            attention_output = self.lambda_1 * attention_output
        hidden_states = self.drop_path(attention_output) + hidden_states
        layer_output = self.layernorm_after(hidden_states)
        layer_output = self.intermediate(layer_output)
        layer_output = self.data2vec_output(layer_output)
        if self.lambda_2 is not None:
            layer_output = self.lambda_2 * layer_output
        layer_output = self.drop_path(layer_output) + hidden_states
        outputs = (layer_output,) + outputs
        return outputs

class TFData2VecVisionRelativePositionBias(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, window_size: tuple, **kwargs) -> None:
        super().__init__(**kwargs)
        self.config = config
        self.window_size = window_size
        self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
        self.relative_position_index = self.get_position_index()

    def build(self, input_shape):
        self.relative_position_bias_table = self.add_weight(shape=(self.num_relative_distance, self.config.num_attention_heads), initializer='zeros', trainable=True, name='relative_position_bias_table')
        super().build(input_shape)

    def get_position_index(self):
        (xx, yy) = tf.meshgrid(range(self.window_size[0]), range(self.window_size[1]))
        coords = tf.stack([yy, xx], axis=0)
        coords_flatten = tf.reshape(coords, [2, -1])
        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
        relative_coords = tf.transpose(relative_coords, perm=[1, 2, 0])
        xx = (relative_coords[:, :, 0] + self.window_size[0] - 1) * (2 * self.window_size[1] - 1)
        yy = relative_coords[:, :, 1] + self.window_size[1] - 1
        relative_coords = tf.stack([xx, yy], axis=-1)
        relative_position_index = tf.reduce_sum(relative_coords, axis=-1)
        top = tf.ones((1, relative_position_index.shape[1]), dtype=relative_position_index.dtype) * (self.num_relative_distance - 3)
        left = tf.ones((relative_position_index.shape[0], 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 2)
        corner = tf.ones((1, 1), dtype=relative_position_index.dtype) * (self.num_relative_distance - 1)
        left_corner = tf.concat([corner, left], axis=0)
        relative_position_index = tf.concat([top, relative_position_index], axis=0)
        relative_position_index = tf.concat([left_corner, relative_position_index], axis=1)
        return relative_position_index

    def call(self, inputs=None) -> tf.Tensor:
        relative_position_bias = tf.gather(self.relative_position_bias_table, self.relative_position_index, axis=0)
        return tf.transpose(relative_position_bias, [2, 0, 1])

class TFData2VecVisionEncoder(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple]=None, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        if config.use_shared_relative_position_bias:
            self.relative_position_bias = TFData2VecVisionRelativePositionBias(config, window_size=window_size, name='relative_position_bias')
        else:
            self.relative_position_bias = None
        dpr = list(tf.linspace(0.0, config.drop_path_rate, config.num_hidden_layers))
        self.layer = [TFData2VecVisionLayer(config, window_size=window_size if config.use_relative_position_bias else None, drop_path_rate=dpr[i], name=f'layer_._{i}') for i in range(config.num_hidden_layers)]

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor | None=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, TFBaseModelOutput]:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_head_mask = head_mask[i] if head_mask is not None else None
            relative_position_bias = self.relative_position_bias(0.0) if self.relative_position_bias is not None else None
            layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'relative_position_bias', None) is not None:
            with tf.name_scope(self.relative_position_bias.name):
                self.relative_position_bias.build(None)
        if getattr(self, 'layer', None) is not None:
            for layer in self.layer:
                with tf.name_scope(layer.name):
                    layer.build(None)

@keras_serializable
class TFData2VecVisionMainLayer(keras.layers.Layer):
    config_class = Data2VecVisionConfig

    def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool=True, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.add_pooling_layer = add_pooling_layer
        self.embeddings = TFData2VecVisionEmbeddings(config, name='embeddings')
        self.encoder = TFData2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape, name='encoder')
        self.layernorm = tf.identity if config.use_mean_pooling else keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm')
        self.pooler = TFData2VecVisionPooler(config, name='pooler') if add_pooling_layer else None

    def get_input_embeddings(self) -> keras.layers.Layer:
        return self.embeddings.patch_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    @unpack_inputs
    def call(self, pixel_values: tf.Tensor | None=None, bool_masked_pos: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        if head_mask is not None:
            raise NotImplementedError
        else:
            head_mask = [None] * self.config.num_hidden_layers
        embedding_output = self.embeddings(pixel_values, bool_masked_pos, training=training)
        encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output)
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
        if not return_dict:
            head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return TFData2VecVisionModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'layernorm', None) is not None:
            if hasattr(self.layernorm, 'name'):
                with tf.name_scope(self.layernorm.name):
                    self.layernorm.build((None, self.config.hidden_size))
        if getattr(self, 'pooler', None) is not None:
            with tf.name_scope(self.pooler.name):
                self.pooler.build(None)

class TFData2VecVisionPooler(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs):
        super().__init__(**kwargs)
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm') if config.use_mean_pooling else None
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        if self.layernorm is not None:
            patch_tokens = hidden_states[:, 1:, :]
            pooled_output = self.layernorm(tf.reduce_mean(patch_tokens, axis=1))
        else:
            pooled_output = hidden_states[:, 0]
        return pooled_output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'layernorm', None) is not None:
            if hasattr(self.layernorm, 'name'):
                with tf.name_scope(self.layernorm.name):
                    self.layernorm.build((None, self.config.hidden_size))

class TFData2VecVisionPreTrainedModel(TFPreTrainedModel):
    config_class = Data2VecVisionConfig
    base_model_prefix = 'data2vec_vision'
    main_input_name = 'pixel_values'
    _keys_to_ignore_on_load_unexpected = ['relative_position_index']
DATA2VEC_VISION_START_DOCSTRING = '\n    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.).\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Args:\n        config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
DATA2VEC_VISION_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`BeitImageProcessor.__call__`] for details.\n\n        head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n            in eager mode, in graph mode the value will always be set to True.\n\n        training (`bool`, *optional*, defaults to `False``):\n            Whether or not to use the model in training mode (some modules like dropout modules have different\n            behaviors between training and evaluation).\n'

@add_start_docstrings('The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.', DATA2VEC_VISION_START_DOCSTRING)
class TFData2VecVisionModel(TFData2VecVisionPreTrainedModel):

    def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool=False, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.config = config
        self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=add_pooling_layer, name='data2vec_vision')

    def get_input_embeddings(self):
        return self.data2vec_vision.get_input_embeddings()

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFData2VecVisionModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def call(self, pixel_values: TFModelInputType | None=None, bool_masked_pos: tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[tuple, TFData2VecVisionModelOutputWithPooling]:
        outputs = self.data2vec_vision(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'data2vec_vision', None) is not None:
            with tf.name_scope(self.data2vec_vision.name):
                self.data2vec_vision.build(None)

@add_start_docstrings('\n    Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of\n    the final hidden states of the patch tokens) e.g. for ImageNet.\n    ', DATA2VEC_VISION_START_DOCSTRING)
class TFData2VecVisionForImageClassification(TFData2VecVisionPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=True, name='data2vec_vision')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def call(self, pixel_values: TFModelInputType | None=None, head_mask: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, tuple]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.data2vec_vision(pixel_values=pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        pooled_output = outputs.pooler_output if return_dict else outputs[1]
        logits = self.classifier(pooled_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'data2vec_vision', None) is not None:
            with tf.name_scope(self.data2vec_vision.name):
                self.data2vec_vision.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

class TFData2VecVisionConvModule(keras.layers.Layer):

    def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], padding: str='valid', bias: bool=False, dilation: Union[int, Tuple[int, int]]=1, **kwargs) -> None:
        super().__init__(**kwargs)
        self.conv = keras.layers.Conv2D(filters=out_channels, kernel_size=kernel_size, padding=padding, use_bias=bias, dilation_rate=dilation, name='conv')
        self.bn = keras.layers.BatchNormalization(name='bn', momentum=0.9, epsilon=1e-05)
        self.activation = tf.nn.relu
        self.in_channels = in_channels
        self.out_channels = out_channels

    def call(self, input: tf.Tensor) -> tf.Tensor:
        output = self.conv(input)
        output = self.bn(output)
        output = self.activation(output)
        return output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'conv', None) is not None:
            with tf.name_scope(self.conv.name):
                self.conv.build([None, None, None, self.in_channels])
        if getattr(self, 'bn', None) is not None:
            with tf.name_scope(self.bn.name):
                self.bn.build((None, None, None, self.out_channels))

class TFAdaptiveAvgPool2D(keras.layers.Layer):

    def __init__(self, output_dims: Tuple[int, int], input_ordering: str='NHWC', **kwargs):
        super().__init__(**kwargs)
        self.output_dims = output_dims
        self.input_ordering = input_ordering
        if input_ordering not in ('NCHW', 'NHWC'):
            raise ValueError("Unrecognized input_ordering, should be 'NCHW' or 'NHWC'!")
        self.h_axis = input_ordering.index('H')
        self.w_axis = input_ordering.index('W')

    def pseudo_1d_pool(self, inputs: tf.Tensor, h_pooling: bool):
        if h_pooling:
            axis = self.h_axis
            output_dim = self.output_dims[0]
        else:
            axis = self.w_axis
            output_dim = self.output_dims[1]
        input_dim = inputs.shape[axis]
        small_window = math.ceil(input_dim / output_dim)
        big_window = small_window + 1
        if h_pooling:
            output_dim = self.output_dims[0]
            small_window_shape = (small_window, 1)
            big_window_shape = (big_window, 1)
        else:
            output_dim = self.output_dims[1]
            small_window_shape = (1, small_window)
            big_window_shape = (1, big_window)
        if output_dim == input_dim:
            return inputs
        elif output_dim == 1:
            return tf.reduce_mean(inputs, axis=axis, keepdims=True)
        elif input_dim % output_dim == 0:
            return tf.nn.avg_pool2d(inputs, ksize=small_window_shape, strides=small_window_shape, padding='VALID', data_format=self.input_ordering)
        elif output_dim > input_dim and output_dim % input_dim == 0:
            return tf.repeat(inputs, repeats=output_dim // input_dim, axis=axis)
        if output_dim < input_dim:
            small_pool = tf.nn.avg_pool2d(inputs, ksize=small_window_shape, strides=1, padding='VALID', data_format=self.input_ordering)
            big_pool = tf.nn.avg_pool2d(inputs, ksize=big_window_shape, strides=1, padding='VALID', data_format=self.input_ordering)
            both_pool = tf.concat([small_pool, big_pool], axis=axis)
        else:
            small_pool = inputs
            big_pool = tf.nn.avg_pool2d(inputs, ksize=big_window_shape, strides=1, padding='VALID', data_format=self.input_ordering)
            both_pool = tf.concat([small_pool, big_pool], axis=axis)
        window_starts = tf.math.floor(tf.range(output_dim, dtype=tf.float32) * input_dim / output_dim)
        window_starts = tf.cast(window_starts, tf.int64)
        window_ends = tf.math.ceil(tf.range(1, output_dim + 1, dtype=tf.float32) * input_dim / output_dim)
        window_ends = tf.cast(window_ends, tf.int64)
        pool_selector = tf.cast(window_ends - window_starts - small_window, tf.bool)
        small_indices = window_starts
        big_indices = window_starts + small_pool.shape[axis]
        gather_indices = tf.where(pool_selector, big_indices, small_indices)
        return tf.gather(both_pool, gather_indices, axis=axis)

    def call(self, inputs: tf.Tensor):
        if self.input_ordering == 'NHWC':
            input_shape = inputs.shape[1:3]
        else:
            input_shape = inputs.shape[2:]
        if self.output_dims[0] == self.output_dims[1] == 1:
            if self.input_ordering == 'NHWC':
                reduce_dims = [1, 2]
            else:
                reduce_dims = [2, 3]
            return tf.reduce_mean(inputs, axis=reduce_dims, keepdims=True)
        elif input_shape[0] % self.output_dims[0] == 0 and input_shape[1] % self.output_dims[1] == 0:
            h_resize = int(input_shape[0] // self.output_dims[0])
            w_resize = int(input_shape[1] // self.output_dims[1])
            return tf.nn.avg_pool2d(inputs, ksize=(h_resize, w_resize), strides=(h_resize, w_resize), padding='VALID', data_format=self.input_ordering)
        else:
            h_pooled = self.pseudo_1d_pool(inputs, h_pooling=True)
            return self.pseudo_1d_pool(h_pooled, h_pooling=False)

class TFData2VecVisionPyramidPoolingModule(keras.layers.Layer):

    def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, out_channels: int, **kwargs) -> None:
        super().__init__(**kwargs)
        self.pool_scales = pool_scales
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.layer_list = []
        for (idx, pool_scale) in enumerate(pool_scales):
            pool_scale = pool_scale if isinstance(pool_scale, collections.abc.Iterable) else (pool_scale, pool_scale)
            self.layer_list.append([TFAdaptiveAvgPool2D(output_dims=pool_scale), TFData2VecVisionConvModule(in_channels=in_channels, out_channels=self.out_channels, kernel_size=1, name=f'{idx}.1')])

    def call(self, x: tf.Tensor) -> List[tf.Tensor]:
        ppm_outs = []
        inputs = x
        for ppm in self.layer_list:
            for layer_module in ppm:
                ppm_out = layer_module(x)
                x = ppm_out
            upsampled_ppm_out = tf.image.resize(ppm_out, size=shape_list(inputs)[1:-1], method='bilinear')
            ppm_outs.append(upsampled_ppm_out)
        return ppm_outs

    def build(self, input_shape=None):
        for layer in self.layer_list:
            for layer_module in layer:
                with tf.name_scope(layer_module.name):
                    layer_module.build(None)

class TFData2VecVisionUperHead(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, **kwargs) -> None:
        super().__init__(**kwargs)
        self.pool_scales = config.pool_scales
        self.in_channels = [config.hidden_size] * 4
        self.channels = config.hidden_size
        self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name='classifier')
        self.psp_modules = TFData2VecVisionPyramidPoolingModule(self.pool_scales, self.in_channels[-1], self.channels, name='psp_modules')
        self.bottleneck = TFData2VecVisionConvModule(self.in_channels[-1] + len(self.pool_scales) * self.channels, self.channels, kernel_size=3, padding='same', name='bottleneck')
        self.lateral_convs = []
        self.fpn_convs = []
        for (idx, in_channels) in enumerate(self.in_channels[:-1]):
            l_conv = TFData2VecVisionConvModule(in_channels, out_channels=self.channels, kernel_size=1, name=f'lateral_convs.{idx}')
            fpn_conv = TFData2VecVisionConvModule(in_channels=self.channels, out_channels=self.channels, kernel_size=3, padding='same', name=f'fpn_convs.{idx}')
            self.lateral_convs.append(l_conv)
            self.fpn_convs.append(fpn_conv)
        self.fpn_bottleneck = TFData2VecVisionConvModule(in_channels=len(self.in_channels) * self.channels, out_channels=self.channels, kernel_size=3, padding='same', name='fpn_bottleneck')

    def psp_forward(self, inputs):
        x = inputs[-1]
        psp_outs = [x]
        psp_outs.extend(self.psp_modules(x))
        psp_outs = tf.concat(psp_outs, axis=-1)
        output = self.bottleneck(psp_outs)
        return output

    def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
        laterals = [lateral_conv(encoder_hidden_states[i]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
        laterals.append(self.psp_forward(encoder_hidden_states))
        used_backbone_levels = len(laterals)
        for i in range(used_backbone_levels - 1, 0, -1):
            prev_shape = shape_list(laterals[i - 1])[1:-1]
            laterals[i - 1] = laterals[i - 1] + tf.image.resize(laterals[i], size=prev_shape, method='bilinear')
        fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
        fpn_outs.append(laterals[-1])
        for i in range(used_backbone_levels - 1, 0, -1):
            fpn_outs[i] = tf.image.resize(fpn_outs[i], size=shape_list(fpn_outs[0])[1:-1], method='bilinear')
        fpn_outs = tf.concat(fpn_outs, axis=-1)
        output = self.fpn_bottleneck(fpn_outs)
        output = self.classifier(output)
        return output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, None, self.channels])
        if getattr(self, 'psp_modules', None) is not None:
            with tf.name_scope(self.psp_modules.name):
                self.psp_modules.build(None)
        if getattr(self, 'bottleneck', None) is not None:
            with tf.name_scope(self.bottleneck.name):
                self.bottleneck.build(None)
        if getattr(self, 'fpn_bottleneck', None) is not None:
            with tf.name_scope(self.fpn_bottleneck.name):
                self.fpn_bottleneck.build(None)
        for layer in self.lateral_convs:
            with tf.name_scope(layer.name):
                layer.build(None)
        for layer in self.fpn_convs:
            with tf.name_scope(layer.name):
                layer.build(None)

class TFData2VecVisionFCNHead(keras.layers.Layer):

    def __init__(self, config: Data2VecVisionConfig, in_index: int=2, kernel_size: int=3, dilation: Union[int, Tuple[int, int]]=1, **kwargs) -> None:
        super().__init__(**kwargs)
        self.in_channels = config.hidden_size
        self.channels = config.auxiliary_channels
        self.num_convs = config.auxiliary_num_convs
        self.concat_input = config.auxiliary_concat_input
        self.in_index = in_index
        convs = []
        convs.append(TFData2VecVisionConvModule(in_channels=self.in_channels, out_channels=self.channels, kernel_size=kernel_size, padding='same', dilation=dilation, name='convs.0'))
        for i in range(self.num_convs - 1):
            convs.append(TFData2VecVisionConvModule(in_channels=self.channels, out_channels=self.channels, kernel_size=kernel_size, padding='same', dilation=dilation, name=f'conv_module_{i + 2}'))
        if self.num_convs == 0:
            self.convs = [tf.identity]
        else:
            self.convs = convs
        if self.concat_input:
            self.conv_cat = TFData2VecVisionConvModule(self.in_channels + self.channels, out_channels=self.channels, kernel_size=kernel_size, padding='same', name='conv_cat')
        self.classifier = keras.layers.Conv2D(config.num_labels, kernel_size=1, name='classifier')

    def call(self, encoder_hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = encoder_hidden_states[self.in_index]
        output = hidden_states
        for layer_module in self.convs:
            output = layer_module(output)
        if self.concat_input:
            output = self.conv_cat(tf.concat([hidden_states, output], axis=-1))
        output = self.classifier(output)
        return output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, None, self.channels])
        if getattr(self, 'conv_cat', None) is not None:
            with tf.name_scope(self.conv_cat.name):
                self.conv_cat.build(None)

@add_start_docstrings('\n    Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.\n    ', DATA2VEC_VISION_START_DOCSTRING)
class TFData2VecVisionForSemanticSegmentation(TFData2VecVisionPreTrainedModel):

    def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None:
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.data2vec_vision = TFData2VecVisionMainLayer(config, add_pooling_layer=False, name='data2vec_vision')
        self.fpn1 = [keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name='fpn1.0'), keras.layers.BatchNormalization(name='fpn1.1', momentum=0.9, epsilon=1e-05), keras.layers.Activation('gelu'), keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name='fpn1.3')]
        self.fpn2 = [keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name='fpn2.0')]
        self.fpn3 = tf.identity
        self.fpn4 = keras.layers.MaxPool2D(pool_size=2, strides=2)
        self.decode_head = TFData2VecVisionUperHead(config, name='decode_head')
        self.auxiliary_head = TFData2VecVisionFCNHead(config, name='auxiliary_head') if config.use_auxiliary_head else None

    def compute_loss(self, logits, auxiliary_logits, labels):
        if len(shape_list(labels)) > 3:
            label_interp_shape = shape_list(labels)[1:-1]
        else:
            label_interp_shape = shape_list(labels)[-2:]
        upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method='bilinear')
        if auxiliary_logits is not None:
            upsampled_auxiliary_logits = tf.image.resize(auxiliary_logits, size=label_interp_shape, method='bilinear')
        loss_fct = keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')

        def masked_loss(real, pred):
            mask = tf.math.logical_not(tf.math.equal(real, self.config.semantic_loss_ignore_index))
            loss_ = loss_fct(real, pred)
            mask = tf.cast(mask, dtype=loss_.dtype)
            loss_ *= mask
            reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask)
            return tf.reshape(reduced_masked_loss, (1,))
        main_loss = masked_loss(labels, upsampled_logits)
        auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits)
        loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
        return loss

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFSemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, labels: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TFSemanticSegmenterOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        if labels is not None and self.config.num_labels == 1:
            raise ValueError('The number of labels should be greater than one')
        outputs = self.data2vec_vision(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)
        encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
        features = [feature for (idx, feature) in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
        patch_resolution = self.config.image_size // self.config.patch_size

        def reshape_features(x):
            x = tf.reshape(x, (-1, patch_resolution, patch_resolution, self.config.hidden_size))
            return x
        features = [reshape_features(x[:, 1:, :]) for x in features]
        ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
        for module in ops[0]:
            features[0] = module(features[0])
        features[1] = ops[1][0](features[1])
        for i in range(len(features[2:])):
            features[i + 2] = ops[i + 2](features[i + 2])
        logits = self.decode_head(features)
        transposed_logits = tf.transpose(logits, perm=[0, 3, 1, 2])
        auxiliary_logits = None
        if self.auxiliary_head is not None:
            auxiliary_logits = self.auxiliary_head(features)
        loss = None
        if labels is not None:
            loss = self.compute_loss(logits, auxiliary_logits, labels)
        if not return_dict:
            if output_hidden_states:
                output = (logits,) + outputs[1:]
            else:
                output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFSemanticSegmenterOutput(loss=loss, logits=transposed_logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'data2vec_vision', None) is not None:
            with tf.name_scope(self.data2vec_vision.name):
                self.data2vec_vision.build(None)
        if getattr(self, 'decode_head', None) is not None:
            with tf.name_scope(self.decode_head.name):
                self.decode_head.build(None)
        if getattr(self, 'auxiliary_head', None) is not None:
            with tf.name_scope(self.auxiliary_head.name):
                self.auxiliary_head.build(None)
        if getattr(self, 'fpn1', None) is not None:
            with tf.name_scope(self.fpn1[0].name):
                self.fpn1[0].build([None, None, None, self.config.hidden_size])
            with tf.name_scope(self.fpn1[1].name):
                self.fpn1[1].build((None, None, None, self.config.hidden_size))
            with tf.name_scope(self.fpn1[3].name):
                self.fpn1[3].build([None, None, None, self.config.hidden_size])
        if getattr(self, 'fpn2', None) is not None:
            with tf.name_scope(self.fpn2[0].name):
                self.fpn2[0].build([None, None, None, self.config.hidden_size])

# File: transformers-main/src/transformers/models/dbrx/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {'configuration_dbrx': ['DbrxConfig']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_dbrx'] = ['DbrxForCausalLM', 'DbrxModel', 'DbrxPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_dbrx import DbrxConfig
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_dbrx import DbrxForCausalLM, DbrxModel, DbrxPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/dbrx/configuration_dbrx.py
""""""
from typing import Any, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class DbrxAttentionConfig(PretrainedConfig):

    def __init__(self, attn_pdrop: float=0.0, clip_qkv: Optional[float]=None, kv_n_heads: int=1, rope_theta: float=10000.0, **kwargs: Any):
        super().__init__(**kwargs)
        self.attn_pdrop = attn_pdrop
        self.clip_qkv = clip_qkv
        self.kv_n_heads = kv_n_heads
        self.rope_theta = rope_theta
        for k in ['model_type']:
            if k in kwargs:
                kwargs.pop(k)
        if len(kwargs) != 0:
            raise ValueError(f'Found unknown kwargs={kwargs!r}')

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> 'PretrainedConfig':
        cls._set_token_in_kwargs(kwargs)
        (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
        if config_dict.get('model_type') == 'dbrx':
            config_dict = config_dict['attn_config']
        if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type):
            logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
        return cls.from_dict(config_dict, **kwargs)

class DbrxFFNConfig(PretrainedConfig):

    def __init__(self, ffn_act_fn: dict=None, ffn_hidden_size: int=3584, moe_num_experts: int=4, moe_top_k: int=1, moe_jitter_eps: Optional[float]=None, moe_loss_weight: float=0.01, moe_normalize_expert_weights: Optional[float]=1.0, **kwargs: Any):
        super().__init__()
        if ffn_act_fn is None:
            ffn_act_fn = {'name': 'silu'}
        self.ffn_act_fn = ffn_act_fn
        self.ffn_hidden_size = ffn_hidden_size
        self.moe_num_experts = moe_num_experts
        self.moe_top_k = moe_top_k
        self.moe_jitter_eps = moe_jitter_eps
        self.moe_loss_weight = moe_loss_weight
        self.moe_normalize_expert_weights = moe_normalize_expert_weights
        for k in ['model_type']:
            if k in kwargs:
                kwargs.pop(k)
        if len(kwargs) != 0:
            raise ValueError(f'Found unknown kwargs={kwargs!r}')

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs: Any) -> 'PretrainedConfig':
        cls._set_token_in_kwargs(kwargs)
        (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
        if config_dict.get('model_type') == 'dbrx':
            config_dict = config_dict['ffn_config']
        if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type):
            logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
        return cls.from_dict(config_dict, **kwargs)

class DbrxConfig(PretrainedConfig):
    model_type = 'dbrx'
    attribute_map = {'num_attention_heads': 'n_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'n_layers', 'max_position_embeddings': 'max_seq_len'}

    def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, max_seq_len: int=2048, vocab_size: int=32000, resid_pdrop: float=0.0, emb_pdrop: float=0.0, attn_config: Optional[DbrxAttentionConfig]=None, ffn_config: Optional[DbrxFFNConfig]=None, use_cache: bool=True, initializer_range: float=0.02, output_router_logits: bool=False, **kwargs: Any):
        if attn_config is None:
            self.attn_config = DbrxAttentionConfig()
        elif isinstance(attn_config, dict):
            self.attn_config = DbrxAttentionConfig(**attn_config)
        else:
            self.attn_config = attn_config
        if ffn_config is None:
            self.ffn_config = DbrxFFNConfig()
        elif isinstance(ffn_config, dict):
            self.ffn_config = DbrxFFNConfig(**ffn_config)
        else:
            self.ffn_config = ffn_config
        self.d_model = d_model
        self.n_heads = n_heads
        self.n_layers = n_layers
        self.max_seq_len = max_seq_len
        self.vocab_size = vocab_size
        self.resid_pdrop = resid_pdrop
        self.emb_pdrop = emb_pdrop
        self.use_cache = use_cache
        self.initializer_range = initializer_range
        self.output_router_logits = output_router_logits
        self.num_key_value_heads = self.attn_config.kv_n_heads
        tie_word_embeddings = kwargs.pop('tie_word_embeddings', False)
        if tie_word_embeddings:
            raise ValueError('tie_word_embeddings is not supported for DBRX models.')
        super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)

# File: transformers-main/src/transformers/models/dbrx/modeling_dbrx.py
""""""
import math
from typing import Any, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings
from .configuration_dbrx import DbrxConfig
if is_flash_attn_2_available():
    from ...modeling_flash_attention_utils import _flash_attention_forward
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DbrxConfig'

def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int):
    if attention_mask is not None and attention_mask.dim() == 4:
        causal_mask = attention_mask
    else:
        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
        if sequence_length != 1:
            causal_mask = torch.triu(causal_mask, diagonal=1)
        causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
        if attention_mask is not None:
            causal_mask = causal_mask.clone()
            mask_length = attention_mask.shape[-1]
            padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
            padding_mask = padding_mask == 0
            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
    return causal_mask

class DbrxRotaryEmbedding(nn.Module):

    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
        super().__init__()
        self.dim = dim
        self.max_position_embeddings = max_position_embeddings
        self.base = base
        inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim)
        self.register_buffer('inv_freq', tensor=inv_freq, persistent=False)

    @torch.no_grad()
    def forward(self, x, position_ids, seq_len=None):
        self.inv_freq.to(x.device)
        inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
        position_ids_expanded = position_ids[:, None, :].float()
        device_type = x.device.type
        device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
        with torch.autocast(device_type=device_type, enabled=False):
            freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
            emb = torch.cat((freqs, freqs), dim=-1)
            cos = emb.cos()
            sin = emb.sin()
        return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))

def rotate_half(x):
    x1 = x[..., :x.shape[-1] // 2]
    x2 = x[..., x.shape[-1] // 2:]
    return torch.cat((-x2, x1), dim=-1)

def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
    cos = cos.unsqueeze(unsqueeze_dim)
    sin = sin.unsqueeze(unsqueeze_dim)
    q_embed = q * cos + rotate_half(q) * sin
    k_embed = k * cos + rotate_half(k) * sin
    return (q_embed, k_embed)

def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
    (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape
    if n_rep == 1:
        return hidden_states
    hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)

def load_balancing_loss_func(gate_logits: torch.Tensor, num_experts: int, top_k: int, attention_mask: Optional[torch.Tensor]) -> torch.Tensor:
    if gate_logits is None or not isinstance(gate_logits, tuple):
        return torch.tensor(0.0)
    if isinstance(gate_logits, tuple):
        compute_device = gate_logits[0].device
        concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
    routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
    (_, selected_experts) = torch.topk(routing_weights, top_k, dim=-1)
    expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
    if attention_mask is None:
        tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
        router_prob_per_expert = torch.mean(routing_weights, dim=0)
    else:
        (batch_size, sequence_length) = attention_mask.shape
        num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
        expert_attention_mask = attention_mask[None, :, :, None, None].expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)).reshape(-1, top_k, num_experts).to(compute_device)
        tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(expert_attention_mask, dim=0)
        router_per_expert_attention_mask = attention_mask[None, :, :, None].expand((num_hidden_layers, batch_size, sequence_length, num_experts)).reshape(-1, num_experts).to(compute_device)
        router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(router_per_expert_attention_mask, dim=0)
    overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
    return overall_loss * num_experts

class DbrxAttention(nn.Module):

    def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
        super().__init__()
        self.config = config
        self.hidden_size = config.d_model
        self.num_heads = config.n_heads
        self.head_dim = self.hidden_size // self.num_heads
        self.max_position_embeddings = config.max_seq_len
        self.block_idx = block_idx
        if block_idx is None:
            logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `block_idx` is not recommended and will ' + 'lead to errors during the forward call if caching is used. Please make sure to provide a `block_idx` ' + 'when creating this class.')
        attn_config = config.attn_config
        self.attn_pdrop = attn_config.attn_pdrop
        self.clip_qkv = attn_config.clip_qkv
        self.num_key_value_heads = attn_config.kv_n_heads
        self.num_key_value_groups = self.num_heads // self.num_key_value_heads
        self.rope_theta = attn_config.rope_theta
        self.is_causal = True
        self.Wqkv = nn.Linear(self.hidden_size, self.hidden_size + 2 * self.num_key_value_heads * self.head_dim, bias=False)
        self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
        self.rotary_emb = DbrxRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta)

    def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
        (bsz, q_len, _) = hidden_states.size()
        qkv_states = self.Wqkv(hidden_states)
        min_val = -self.clip_qkv if self.clip_qkv is not None else None
        max_val = self.clip_qkv
        qkv_states = qkv_states.clamp(min=min_val, max=max_val)
        (query_states, key_states, value_states) = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
        if attention_mask is not None:
            causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
            attn_weights = attn_weights + causal_mask
        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
        attn_weights = nn.functional.dropout(attn_weights, p=self.attn_pdrop, training=self.training)
        attn_output = torch.matmul(attn_weights, value_states)
        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}')
        attn_output = attn_output.transpose(1, 2).contiguous()
        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
        attn_output = self.out_proj(attn_output)
        if not output_attentions:
            attn_weights = None
        return (attn_output, attn_weights, past_key_value)

class DbrxFlashAttention2(DbrxAttention):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if isinstance(past_key_value, StaticCache):
            raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers')
        logger.info('Implicitly setting `output_attentions` to False as it is not supported in Flash Attention.')
        output_attentions = False
        (bsz, q_len, _) = hidden_states.size()
        qkv_states = self.Wqkv(hidden_states)
        if self.clip_qkv is not None:
            qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
        (query_states, key_states, value_states) = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
        query_states = query_states.transpose(1, 2)
        key_states = key_states.transpose(1, 2)
        value_states = value_states.transpose(1, 2)
        dropout_rate = self.attn_pdrop if self.training else 0.0
        input_dtype = query_states.dtype
        if input_dtype == torch.float32:
            if torch.is_autocast_enabled():
                target_dtype = torch.get_autocast_gpu_dtype()
            elif hasattr(self.config, '_pre_quantization_dtype'):
                target_dtype = self.config._pre_quantization_dtype
            else:
                target_dtype = query_states.dtype
            logger.warning_once('The input hidden states seems to be silently casted in float32, this might be ' + 'related to the fact you have upcasted embedding or layer norm layers in ' + f'float32. We will cast back the input in {target_dtype}.')
            query_states = query_states.to(target_dtype)
            key_states = key_states.to(target_dtype)
            value_states = value_states.to(target_dtype)
        attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask)
        attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
        attn_output = self.out_proj(attn_output)
        if not output_attentions:
            attn_weights = None
        return (attn_output, attn_weights, past_key_value)

class DbrxSdpaAttention(DbrxAttention):

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        if output_attentions:
            logger.warning_once('DbrxModel is using DbrxSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
            return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
        (bsz, q_len, _) = hidden_states.size()
        qkv_states = self.Wqkv(hidden_states)
        if self.clip_qkv is not None:
            qkv_states = qkv_states.clamp(min=-self.clip_qkv, max=self.clip_qkv)
        (query_states, key_states, value_states) = qkv_states.split([self.hidden_size, self.num_key_value_heads * self.head_dim, self.num_key_value_heads * self.head_dim], dim=2)
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
        (cos, sin) = self.rotary_emb(value_states, position_ids, seq_len=None)
        (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, None)
        if past_key_value is not None:
            cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
            (key_states, value_states) = past_key_value.update(key_states, value_states, self.block_idx, cache_kwargs)
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        causal_mask = attention_mask
        if attention_mask is not None:
            causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
        if query_states.device.type == 'cuda' and causal_mask is not None:
            query_states = query_states.contiguous()
            key_states = key_states.contiguous()
            value_states = value_states.contiguous()
        is_causal = True if causal_mask is None and q_len > 1 else False
        attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attn_pdrop if self.training else 0.0, is_causal=is_causal)
        attn_output = attn_output.transpose(1, 2).contiguous()
        attn_output = attn_output.view(bsz, q_len, -1)
        attn_output = self.out_proj(attn_output)
        return (attn_output, None, past_key_value)
DBRX_ATTENTION_CLASSES = {'eager': DbrxAttention, 'flash_attention_2': DbrxFlashAttention2, 'sdpa': DbrxSdpaAttention}

class DbrxNormAttentionNorm(nn.Module):

    def __init__(self, config: DbrxConfig, block_idx: Optional[int]=None):
        super().__init__()
        self.block_idx = block_idx
        self.resid_pdrop = config.resid_pdrop
        self.norm_1 = nn.LayerNorm(config.d_model, bias=False)
        self.attn = DBRX_ATTENTION_CLASSES[config._attn_implementation](config=config, block_idx=block_idx)
        self.norm_2 = nn.LayerNorm(config.d_model, bias=False)

    def forward(self, hidden_states: torch.Tensor, position_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[Cache]]:
        residual_states = hidden_states
        hidden_states = self.norm_1(hidden_states).to(hidden_states.dtype)
        (hidden_states, attn_weights, past_key_value) = self.attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)
        hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
        hidden_states = hidden_states + residual_states
        residual_states = hidden_states
        hidden_states = self.norm_2(hidden_states).to(hidden_states.dtype)
        return (residual_states, hidden_states, attn_weights, past_key_value)

class DbrxRouter(nn.Module):

    def __init__(self, hidden_size: int, moe_num_experts: int, moe_top_k: int, moe_jitter_eps: Optional[float], moe_normalize_expert_weights: Optional[float]):
        super().__init__()
        self.hidden_size = hidden_size
        self.moe_num_experts = moe_num_experts
        self.moe_top_k = moe_top_k
        self.moe_jitter_eps = moe_jitter_eps
        self.moe_normalize_expert_weights = moe_normalize_expert_weights
        self.layer = nn.Linear(self.hidden_size, self.moe_num_experts, bias=False)

    def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
        if self.training and self.moe_jitter_eps is not None:
            hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.moe_jitter_eps, 1.0 + self.moe_jitter_eps)
        hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
        weights = self.layer(hidden_states).softmax(dim=-1, dtype=torch.float32)
        (top_weights, top_experts) = torch.topk(weights, self.moe_top_k, dim=-1)
        top_weights_scale = torch.norm(top_weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True) if self.moe_normalize_expert_weights is not None else 1.0
        top_weights = top_weights / top_weights_scale
        weights = weights.to(hidden_states.dtype)
        top_weights = top_weights.to(hidden_states.dtype)
        return (weights, top_weights, top_experts)

class DbrxExpertGLU(nn.Module):

    def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
        super().__init__()
        self.hidden_size = hidden_size
        self.ffn_hidden_size = ffn_hidden_size
        self.moe_num_experts = moe_num_experts
        self.w1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
        self.v1 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
        self.w2 = nn.Parameter(torch.empty(moe_num_experts * ffn_hidden_size, hidden_size))
        act_fn_name = ffn_act_fn.get('name', 'silu')
        self.activation_fn = ACT2FN[act_fn_name]

    def forward(self, x: torch.Tensor, expert_w1: torch.Tensor, expert_v1: torch.Tensor, expert_w2: torch.Tensor) -> torch.Tensor:
        gate_proj = x.matmul(expert_w1.t())
        up_proj = x.matmul(expert_v1.t())
        gate_proj = self.activation_fn(gate_proj)
        intermediate_states = gate_proj * up_proj
        down_proj = intermediate_states.matmul(expert_w2)
        return down_proj

class DbrxExperts(nn.Module):

    def __init__(self, hidden_size: int, ffn_hidden_size: int, moe_num_experts: int, ffn_act_fn: dict):
        super().__init__()
        self.moe_num_experts = moe_num_experts
        self.mlp = DbrxExpertGLU(hidden_size=hidden_size, ffn_hidden_size=ffn_hidden_size, moe_num_experts=moe_num_experts, ffn_act_fn=ffn_act_fn)

    def forward(self, x: torch.Tensor, weights: torch.Tensor, top_weights: torch.Tensor, top_experts: torch.LongTensor) -> torch.Tensor:
        (bsz, q_len, hidden_size) = x.shape
        x = x.view(-1, hidden_size)
        out = torch.zeros_like(x)
        expert_mask = nn.functional.one_hot(top_experts, num_classes=self.moe_num_experts).permute(2, 1, 0)
        w1_chunked = self.mlp.w1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
        v1_chunked = self.mlp.v1.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
        w2_chunked = self.mlp.w2.view(self.mlp.moe_num_experts, self.mlp.ffn_hidden_size, self.mlp.hidden_size).chunk(self.moe_num_experts, dim=0)
        w1_chunked = [w1.squeeze(dim=0) for w1 in w1_chunked]
        v1_chunked = [v1.squeeze(dim=0) for v1 in v1_chunked]
        w2_chunked = [w2.squeeze(dim=0) for w2 in w2_chunked]
        for expert_idx in range(0, self.moe_num_experts):
            (topk_idx, token_idx) = torch.where(expert_mask[expert_idx])
            if token_idx.shape[0] == 0:
                continue
            token_list = token_idx
            topk_list = topk_idx
            expert_tokens = x[None, token_list].reshape(-1, hidden_size)
            expert_out = self.mlp(expert_tokens, w1_chunked[expert_idx], v1_chunked[expert_idx], w2_chunked[expert_idx]) * top_weights[token_list, topk_list, None]
            out.index_add_(0, token_idx, expert_out)
        out = out.reshape(bsz, q_len, hidden_size)
        return out

class DbrxFFN(nn.Module):

    def __init__(self, config: DbrxConfig):
        super().__init__()
        ffn_config = config.ffn_config
        self.router = DbrxRouter(hidden_size=config.d_model, moe_num_experts=ffn_config.moe_num_experts, moe_top_k=ffn_config.moe_top_k, moe_jitter_eps=ffn_config.moe_jitter_eps, moe_normalize_expert_weights=ffn_config.moe_normalize_expert_weights)
        self.experts = DbrxExperts(hidden_size=config.d_model, ffn_hidden_size=ffn_config.ffn_hidden_size, moe_num_experts=ffn_config.moe_num_experts, ffn_act_fn=ffn_config.ffn_act_fn)

    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        (weights, top_weights, top_experts) = self.router(x)
        out = self.experts(x, weights, top_weights, top_experts)
        return (out, weights)

class DbrxBlock(nn.Module):

    def __init__(self, config: DbrxConfig, block_idx: int):
        super().__init__()
        self.hidden_size = config.d_model
        self.resid_pdrop = config.resid_pdrop
        self.block_idx = block_idx
        self.norm_attn_norm = DbrxNormAttentionNorm(config=config, block_idx=block_idx)
        self.ffn = DbrxFFN(config=config)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: torch.LongTensor=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Any) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[Cache]], Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]], Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[Cache], Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache], Optional[torch.Tensor]]]:
        (resid_states, hidden_states, self_attn_weights, present_key_value) = self.norm_attn_norm(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)
        (hidden_states, router_logits) = self.ffn(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.resid_pdrop, training=self.training)
        hidden_states = resid_states + hidden_states
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights,)
        if use_cache:
            outputs += (present_key_value,)
        if output_router_logits:
            outputs += (router_logits,)
        return outputs
DBRX_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`DbrxConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'

@add_start_docstrings('The bare DBRX Model outputting raw hidden-states without any specific head on top.', DBRX_START_DOCSTRING)
class DbrxPreTrainedModel(PreTrainedModel):
    config_class = DbrxConfig
    base_model_prefix = 'transformer'
    supports_gradient_checkpointing = True
    _no_split_modules = ['DbrxBlock']
    _skip_keys_device_placement = ['past_key_values']
    _supports_flash_attn_2 = True
    _supports_sdpa = True
    _supports_cache_class = True
    _supports_quantized_cache = True
    _supports_static_cache = True

    def _init_weights(self, module: nn.Module):
        std = self.config.initializer_range
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, DbrxExpertGLU):
            module.w1.data.normal_(mean=0.0, std=std)
            module.v1.data.normal_(mean=0.0, std=std)
            module.w2.data.normal_(mean=0.0, std=std)
DBRX_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n            it.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n            `past_key_values`).\n\n            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n            information on the default strategy.\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n            Two formats are allowed:\n            - a [`~cache_utils.Cache`] instance, see our\n            [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n            cache format.\n\n            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n            legacy cache format will be returned.\n\n            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n            of shape `(batch_size, sequence_length)`.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        output_router_logits (`bool`, *optional*):\n            Whether or not to return the logits of all the routers. They are useful for computing the router loss, and\n            should not be returned during inference.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n            the complete sequence length.\n"

@add_start_docstrings('The bare DBRX Model outputting raw hidden-states without any specific head on top.', DBRX_START_DOCSTRING)
class DbrxModel(DbrxPreTrainedModel):

    def __init__(self, config: DbrxConfig):
        super().__init__(config)
        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size
        self.emb_pdrop = config.emb_pdrop
        self.wte = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
        self.blocks = nn.ModuleList([DbrxBlock(config, block_idx) for block_idx in range(config.n_layers)])
        self.norm_f = nn.LayerNorm(config.d_model, bias=False)
        self.gradient_checkpointing = False
        self.post_init()

    def get_input_embeddings(self) -> nn.Embedding:
        return self.wte

    def set_input_embeddings(self, value: nn.Embedding):
        self.wte = value

    @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, MoeModelOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if (input_ids is None) ^ (inputs_embeds is not None):
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
        if self.gradient_checkpointing and self.training and use_cache:
            logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
            use_cache = False
        if inputs_embeds is None:
            inputs_embeds = self.wte(input_ids)
        inputs_embeds = nn.functional.dropout(inputs_embeds, p=self.emb_pdrop, training=self.training)
        return_legacy_cache = False
        if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training):
            return_legacy_cache = True
            past_key_values = DynamicCache.from_legacy_cache(past_key_values)
            logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)')
        if cache_position is None:
            past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
            cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
        if position_ids is None:
            position_ids = cache_position.unsqueeze(0)
        causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
        hidden_states = inputs_embeds
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        all_router_logits = () if output_router_logits else None
        next_decoder_cache = None
        for block in self.blocks:
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.gradient_checkpointing and self.training:
                block_outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, output_router_logits, use_cache, cache_position)
            else:
                block_outputs = block(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, output_router_logits=output_router_logits, use_cache=use_cache, cache_position=cache_position)
            hidden_states = block_outputs[0]
            if use_cache:
                next_decoder_cache = block_outputs[2 if output_attentions else 1]
            if output_attentions:
                all_self_attns += (block_outputs[1],)
            if output_router_logits:
                all_router_logits += (block_outputs[-1],)
        hidden_states = self.norm_f(hidden_states)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        next_cache = next_decoder_cache if use_cache else None
        if return_legacy_cache:
            next_cache = next_cache.to_legacy_cache()
        if not return_dict:
            return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] if v is not None))
        return MoeModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits)

    def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool):
        if self.config._attn_implementation == 'flash_attention_2':
            if attention_mask is not None and 0.0 in attention_mask:
                return attention_mask
            return None
        past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
        using_static_cache = isinstance(past_key_values, StaticCache)
        if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions):
            if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
                return None
        (dtype, device) = (input_tensor.dtype, input_tensor.device)
        min_dtype = torch.finfo(dtype).min
        sequence_length = input_tensor.shape[1]
        if using_static_cache:
            target_length = past_key_values.get_max_length()
        else:
            target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
        causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
        if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions):
            causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
        return causal_mask

@add_start_docstrings('The DBRX Model transformer for causal language modeling.', DBRX_START_DOCSTRING)
class DbrxForCausalLM(DbrxPreTrainedModel):

    def __init__(self, config: DbrxConfig):
        super().__init__(config)
        self.transformer = DbrxModel(config)
        self.vocab_size = config.vocab_size
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
        self.moe_loss_weight = config.ffn_config.moe_loss_weight
        self.num_experts = config.ffn_config.moe_num_experts
        self.num_experts_per_tok = config.ffn_config.moe_top_k
        self.post_init()

    def get_input_embeddings(self) -> nn.Embedding:
        return self.transformer.get_input_embeddings()

    def set_input_embeddings(self, value: nn.Embedding):
        self.transformer.set_input_embeddings(value)

    def get_output_embeddings(self) -> nn.Linear:
        return self.lm_head

    def set_output_embeddings(self, new_embeddings: nn.Linear):
        self.lm_head = new_embeddings

    def set_decoder(self, decoder: DbrxModel):
        self.transformer = decoder

    def get_decoder(self) -> DbrxModel:
        return self.transformer

    @add_start_docstrings_to_model_forward(DBRX_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, num_logits_to_keep: int=0) -> Union[Tuple, MoeCausalLMOutputWithPast]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
        hidden_states = outputs[0]
        logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
        loss = None
        if labels is not None:
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            loss_fct = nn.CrossEntropyLoss()
            shift_logits = shift_logits.view(-1, self.config.vocab_size)
            shift_labels = shift_labels.view(-1)
            shift_labels = shift_labels.to(shift_logits.device)
            loss = loss_fct(shift_logits, shift_labels)
        aux_loss = None
        if output_router_logits:
            aux_loss = load_balancing_loss_func(outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask)
            if labels is not None and loss is not None:
                loss += self.moe_loss_weight * aux_loss.to(loss.device)
        if not return_dict:
            output = (logits,) + outputs[1:]
            if output_router_logits:
                output = (aux_loss,) + output
            return (loss,) + output if loss is not None else output
        return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)

    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, num_logits_to_keep=None, **kwargs):
        if past_key_values is not None:
            if inputs_embeds is not None:
                input_ids = input_ids[:, -cache_position.shape[0]:]
            elif input_ids.shape[1] != cache_position.shape[0]:
                input_ids = input_ids[:, cache_position]
        if attention_mask is not None and position_ids is None:
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)
            if past_key_values:
                position_ids = position_ids[:, -input_ids.shape[1]:]
                position_ids = position_ids.clone(memory_format=torch.contiguous_format)
        if inputs_embeds is not None and cache_position[0] == 0:
            model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None}
        else:
            model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None}
        if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2:
            if model_inputs['inputs_embeds'] is not None:
                (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape
                device = model_inputs['inputs_embeds'].device
            else:
                (batch_size, sequence_length) = model_inputs['input_ids'].shape
                device = model_inputs['input_ids'].device
            dtype = self.lm_head.weight.dtype
            min_dtype = torch.finfo(dtype).min
            attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size)
        if num_logits_to_keep is not None:
            model_inputs['num_logits_to_keep'] = num_logits_to_keep
        model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask})
        return model_inputs

# File: transformers-main/src/transformers/models/deberta/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_deberta': ['DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer']}
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_deberta_fast'] = ['DebertaTokenizerFast']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_deberta'] = ['DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_deberta'] = ['TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_deberta import DebertaConfig, DebertaOnnxConfig
    from .tokenization_deberta import DebertaTokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_deberta_fast import DebertaTokenizerFast
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_deberta import DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_deberta import TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deberta/configuration_deberta.py
""""""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
    from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
logger = logging.get_logger(__name__)

class DebertaConfig(PretrainedConfig):
    model_type = 'deberta'

    def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', **kwargs):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.relative_attention = relative_attention
        self.max_relative_positions = max_relative_positions
        self.pad_token_id = pad_token_id
        self.position_biased_input = position_biased_input
        if isinstance(pos_att_type, str):
            pos_att_type = [x.strip() for x in pos_att_type.lower().split('|')]
        self.pos_att_type = pos_att_type
        self.vocab_size = vocab_size
        self.layer_norm_eps = layer_norm_eps
        self.pooler_hidden_size = kwargs.get('pooler_hidden_size', hidden_size)
        self.pooler_dropout = pooler_dropout
        self.pooler_hidden_act = pooler_hidden_act

class DebertaOnnxConfig(OnnxConfig):

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        if self.task == 'multiple-choice':
            dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
        else:
            dynamic_axis = {0: 'batch', 1: 'sequence'}
        if self._config.type_vocab_size > 0:
            return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
        else:
            return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])

    @property
    def default_onnx_opset(self) -> int:
        return 12

    def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, framework: Optional['TensorType']=None, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
        dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
        if self._config.type_vocab_size == 0 and 'token_type_ids' in dummy_inputs:
            del dummy_inputs['token_type_ids']
        return dummy_inputs

# File: transformers-main/src/transformers/models/deberta/modeling_deberta.py
""""""
from collections.abc import Sequence
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import softmax_backward_data
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta import DebertaConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DebertaConfig'
_CHECKPOINT_FOR_DOC = 'microsoft/deberta-base'
_CHECKPOINT_FOR_MASKED_LM = 'lsanochkin/deberta-large-feedback'
_MASKED_LM_EXPECTED_OUTPUT = "' Paris'"
_MASKED_LM_EXPECTED_LOSS = '0.54'
_CHECKPOINT_FOR_QA = 'Palak/microsoft_deberta-large_squad'
_QA_EXPECTED_OUTPUT = "' a nice puppet'"
_QA_EXPECTED_LOSS = 0.14
_QA_TARGET_START_INDEX = 12
_QA_TARGET_END_INDEX = 14

class ContextPooler(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
        self.dropout = StableDropout(config.pooler_dropout)
        self.config = config

    def forward(self, hidden_states):
        context_token = hidden_states[:, 0]
        context_token = self.dropout(context_token)
        pooled_output = self.dense(context_token)
        pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
        return pooled_output

    @property
    def output_dim(self):
        return self.config.hidden_size

class XSoftmax(torch.autograd.Function):

    @staticmethod
    def forward(ctx, input, mask, dim):
        ctx.dim = dim
        rmask = ~mask.to(torch.bool)
        output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
        output = torch.softmax(output, ctx.dim)
        output.masked_fill_(rmask, 0)
        ctx.save_for_backward(output)
        return output

    @staticmethod
    def backward(ctx, grad_output):
        (output,) = ctx.saved_tensors
        inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)
        return (inputGrad, None, None)

    @staticmethod
    def symbolic(g, self, mask, dim):
        import torch.onnx.symbolic_helper as sym_help
        from torch.onnx.symbolic_opset9 import masked_fill, softmax
        mask_cast_value = g.op('Cast', mask, to_i=sym_help.cast_pytorch_to_onnx['Long'])
        r_mask = g.op('Cast', g.op('Sub', g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Bool'])
        output = masked_fill(g, self, r_mask, g.op('Constant', value_t=torch.tensor(torch.finfo(self.type().dtype()).min)))
        output = softmax(g, output, dim)
        return masked_fill(g, output, r_mask, g.op('Constant', value_t=torch.tensor(0, dtype=torch.bool)))

class DropoutContext:

    def __init__(self):
        self.dropout = 0
        self.mask = None
        self.scale = 1
        self.reuse_mask = True

def get_mask(input, local_context):
    if not isinstance(local_context, DropoutContext):
        dropout = local_context
        mask = None
    else:
        dropout = local_context.dropout
        dropout *= local_context.scale
        mask = local_context.mask if local_context.reuse_mask else None
    if dropout > 0 and mask is None:
        mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
    if isinstance(local_context, DropoutContext):
        if local_context.mask is None:
            local_context.mask = mask
    return (mask, dropout)

class XDropout(torch.autograd.Function):

    @staticmethod
    def forward(ctx, input, local_ctx):
        (mask, dropout) = get_mask(input, local_ctx)
        ctx.scale = 1.0 / (1 - dropout)
        if dropout > 0:
            ctx.save_for_backward(mask)
            return input.masked_fill(mask, 0) * ctx.scale
        else:
            return input

    @staticmethod
    def backward(ctx, grad_output):
        if ctx.scale > 1:
            (mask,) = ctx.saved_tensors
            return (grad_output.masked_fill(mask, 0) * ctx.scale, None)
        else:
            return (grad_output, None)

    @staticmethod
    def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
        from torch.onnx import symbolic_opset12
        dropout_p = local_ctx
        if isinstance(local_ctx, DropoutContext):
            dropout_p = local_ctx.dropout
        train = True
        return symbolic_opset12.dropout(g, input, dropout_p, train)

class StableDropout(nn.Module):

    def __init__(self, drop_prob):
        super().__init__()
        self.drop_prob = drop_prob
        self.count = 0
        self.context_stack = None

    def forward(self, x):
        if self.training and self.drop_prob > 0:
            return XDropout.apply(x, self.get_context())
        return x

    def clear_context(self):
        self.count = 0
        self.context_stack = None

    def init_context(self, reuse_mask=True, scale=1):
        if self.context_stack is None:
            self.context_stack = []
        self.count = 0
        for c in self.context_stack:
            c.reuse_mask = reuse_mask
            c.scale = scale

    def get_context(self):
        if self.context_stack is not None:
            if self.count >= len(self.context_stack):
                self.context_stack.append(DropoutContext())
            ctx = self.context_stack[self.count]
            ctx.dropout = self.drop_prob
            self.count += 1
            return ctx
        else:
            return self.drop_prob

class DebertaLayerNorm(nn.Module):

    def __init__(self, size, eps=1e-12):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(size))
        self.bias = nn.Parameter(torch.zeros(size))
        self.variance_epsilon = eps

    def forward(self, hidden_states):
        input_type = hidden_states.dtype
        hidden_states = hidden_states.float()
        mean = hidden_states.mean(-1, keepdim=True)
        variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
        hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
        hidden_states = hidden_states.to(input_type)
        y = self.weight * hidden_states + self.bias
        return y

class DebertaSelfOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class DebertaAttention(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.self = DisentangledSelfAttention(config)
        self.output = DebertaSelfOutput(config)
        self.config = config

    def forward(self, hidden_states, attention_mask, output_attentions=False, query_states=None, relative_pos=None, rel_embeddings=None):
        self_output = self.self(hidden_states, attention_mask, output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
        if output_attentions:
            (self_output, att_matrix) = self_output
        if query_states is None:
            query_states = hidden_states
        attention_output = self.output(self_output, query_states)
        if output_attentions:
            return (attention_output, att_matrix)
        else:
            return attention_output

class DebertaIntermediate(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class DebertaOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)
        self.config = config

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class DebertaLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.attention = DebertaAttention(config)
        self.intermediate = DebertaIntermediate(config)
        self.output = DebertaOutput(config)

    def forward(self, hidden_states, attention_mask, query_states=None, relative_pos=None, rel_embeddings=None, output_attentions=False):
        attention_output = self.attention(hidden_states, attention_mask, output_attentions=output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
        if output_attentions:
            (attention_output, att_matrix) = attention_output
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        if output_attentions:
            return (layer_output, att_matrix)
        else:
            return layer_output

class DebertaEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
        self.relative_attention = getattr(config, 'relative_attention', False)
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
        self.gradient_checkpointing = False

    def get_rel_embedding(self):
        rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
        return rel_embeddings

    def get_attention_mask(self, attention_mask):
        if attention_mask.dim() <= 2:
            extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
            attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
        elif attention_mask.dim() == 3:
            attention_mask = attention_mask.unsqueeze(1)
        return attention_mask

    def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
        if self.relative_attention and relative_pos is None:
            q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
            relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
        return relative_pos

    def forward(self, hidden_states, attention_mask, output_hidden_states=True, output_attentions=False, query_states=None, relative_pos=None, return_dict=True):
        attention_mask = self.get_attention_mask(attention_mask)
        relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        if isinstance(hidden_states, Sequence):
            next_kv = hidden_states[0]
        else:
            next_kv = hidden_states
        rel_embeddings = self.get_rel_embedding()
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            if self.gradient_checkpointing and self.training:
                hidden_states = self._gradient_checkpointing_func(layer_module.__call__, next_kv, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)
            else:
                hidden_states = layer_module(next_kv, attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions)
            if output_attentions:
                (hidden_states, att_m) = hidden_states
            if query_states is not None:
                query_states = hidden_states
                if isinstance(hidden_states, Sequence):
                    next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
            else:
                next_kv = hidden_states
            if output_attentions:
                all_attentions = all_attentions + (att_m,)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)

def build_relative_position(query_size, key_size, device):
    q_ids = torch.arange(query_size, dtype=torch.long, device=device)
    k_ids = torch.arange(key_size, dtype=torch.long, device=device)
    rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
    rel_pos_ids = rel_pos_ids[:query_size, :]
    rel_pos_ids = rel_pos_ids.unsqueeze(0)
    return rel_pos_ids

@torch.jit.script
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
    return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])

@torch.jit.script
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
    return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])

@torch.jit.script
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
    return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))

class DisentangledSelfAttention(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
        self.q_bias = nn.Parameter(torch.zeros(self.all_head_size, dtype=torch.float))
        self.v_bias = nn.Parameter(torch.zeros(self.all_head_size, dtype=torch.float))
        self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
        self.relative_attention = getattr(config, 'relative_attention', False)
        self.talking_head = getattr(config, 'talking_head', False)
        if self.talking_head:
            self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
            self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.pos_dropout = StableDropout(config.hidden_dropout_prob)
            if 'c2p' in self.pos_att_type:
                self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
            if 'p2c' in self.pos_att_type:
                self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
        self.dropout = StableDropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
        x = x.view(new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states, attention_mask, output_attentions=False, query_states=None, relative_pos=None, rel_embeddings=None):
        if query_states is None:
            qp = self.in_proj(hidden_states)
            (query_layer, key_layer, value_layer) = self.transpose_for_scores(qp).chunk(3, dim=-1)
        else:

            def linear(w, b, x):
                if b is not None:
                    return torch.matmul(x, w.t()) + b.t()
                else:
                    return torch.matmul(x, w.t())
            ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
            qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
            qkvb = [None] * 3
            q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype))
            (k, v) = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)]
            (query_layer, key_layer, value_layer) = [self.transpose_for_scores(x) for x in [q, k, v]]
        query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
        value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
        rel_att = None
        scale_factor = 1 + len(self.pos_att_type)
        scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
        query_layer = query_layer / scale.to(dtype=query_layer.dtype)
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        if self.relative_attention:
            rel_embeddings = self.pos_dropout(rel_embeddings)
            rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
        if rel_att is not None:
            attention_scores = attention_scores + rel_att
        if self.talking_head:
            attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
        attention_probs = self.dropout(attention_probs)
        if self.talking_head:
            attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (-1,)
        context_layer = context_layer.view(new_context_layer_shape)
        if output_attentions:
            return (context_layer, attention_probs)
        else:
            return context_layer

    def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = query_layer.size(-2)
            relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
        if relative_pos.dim() == 2:
            relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
        elif relative_pos.dim() == 3:
            relative_pos = relative_pos.unsqueeze(1)
        elif relative_pos.dim() != 4:
            raise ValueError(f'Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}')
        att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
        relative_pos = relative_pos.long().to(query_layer.device)
        rel_embeddings = rel_embeddings[self.max_relative_positions - att_span:self.max_relative_positions + att_span, :].unsqueeze(0)
        score = 0
        if 'c2p' in self.pos_att_type:
            pos_key_layer = self.pos_proj(rel_embeddings)
            pos_key_layer = self.transpose_for_scores(pos_key_layer)
            c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
            c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
            c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
            score += c2p_att
        if 'p2c' in self.pos_att_type:
            pos_query_layer = self.pos_q_proj(rel_embeddings)
            pos_query_layer = self.transpose_for_scores(pos_query_layer)
            pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
            if query_layer.size(-2) != key_layer.size(-2):
                r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
            else:
                r_pos = relative_pos
            p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
            p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype))
            p2c_att = torch.gather(p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)).transpose(-1, -2)
            if query_layer.size(-2) != key_layer.size(-2):
                pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
                p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
            score += p2c_att
        return score

class DebertaEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        pad_token_id = getattr(config, 'pad_token_id', 0)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
        self.position_biased_input = getattr(config, 'position_biased_input', True)
        if not self.position_biased_input:
            self.position_embeddings = None
        else:
            self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
        if config.type_vocab_size > 0:
            self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
        if self.embedding_size != config.hidden_size:
            self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
        self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)
        self.config = config
        self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)

    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]
        seq_length = input_shape[1]
        if position_ids is None:
            position_ids = self.position_ids[:, :seq_length]
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        if self.position_embeddings is not None:
            position_embeddings = self.position_embeddings(position_ids.long())
        else:
            position_embeddings = torch.zeros_like(inputs_embeds)
        embeddings = inputs_embeds
        if self.position_biased_input:
            embeddings += position_embeddings
        if self.config.type_vocab_size > 0:
            token_type_embeddings = self.token_type_embeddings(token_type_ids)
            embeddings += token_type_embeddings
        if self.embedding_size != self.config.hidden_size:
            embeddings = self.embed_proj(embeddings)
        embeddings = self.LayerNorm(embeddings)
        if mask is not None:
            if mask.dim() != embeddings.dim():
                if mask.dim() == 4:
                    mask = mask.squeeze(1).squeeze(1)
                mask = mask.unsqueeze(2)
            mask = mask.to(embeddings.dtype)
            embeddings = embeddings * mask
        embeddings = self.dropout(embeddings)
        return embeddings

class DebertaPreTrainedModel(PreTrainedModel):
    config_class = DebertaConfig
    base_model_prefix = 'deberta'
    _keys_to_ignore_on_load_unexpected = ['position_embeddings']
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
DEBERTA_START_DOCSTRING = "\n    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled\n    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build\n    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n\n    Parameters:\n        config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
DEBERTA_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.', DEBERTA_START_DOCSTRING)
class DebertaModel(DebertaPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.embeddings = DebertaEmbeddings(config)
        self.encoder = DebertaEncoder(config)
        self.z_steps = 0
        self.config = config
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, new_embeddings):
        self.embeddings.word_embeddings = new_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError('The prune function is not implemented in DeBERTa model.')

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        if attention_mask is None:
            attention_mask = torch.ones(input_shape, device=device)
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
        embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, mask=attention_mask, inputs_embeds=inputs_embeds)
        encoder_outputs = self.encoder(embedding_output, attention_mask, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict)
        encoded_layers = encoder_outputs[1]
        if self.z_steps > 1:
            hidden_states = encoded_layers[-2]
            layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
            query_states = encoded_layers[-1]
            rel_embeddings = self.encoder.get_rel_embedding()
            attention_mask = self.encoder.get_attention_mask(attention_mask)
            rel_pos = self.encoder.get_rel_pos(embedding_output)
            for layer in layers[1:]:
                query_states = layer(hidden_states, attention_mask, output_attentions=False, query_states=query_states, relative_pos=rel_pos, rel_embeddings=rel_embeddings)
                encoded_layers.append(query_states)
        sequence_output = encoded_layers[-1]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1 if output_hidden_states else 2:]
        return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions)

@add_start_docstrings('DeBERTa Model with a `language modeling` head on top.', DEBERTA_START_DOCSTRING)
class DebertaForMaskedLM(DebertaPreTrainedModel):
    _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']

    def __init__(self, config):
        super().__init__(config)
        self.deberta = DebertaModel(config)
        self.cls = DebertaOnlyMLMHead(config)
        self.post_init()

    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings
        self.cls.predictions.bias = new_embeddings.bias

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_MASKED_LM, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='[MASK]', expected_output=_MASKED_LM_EXPECTED_OUTPUT, expected_loss=_MASKED_LM_EXPECTED_LOSS)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)
        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        if not return_dict:
            output = (prediction_scores,) + outputs[1:]
            return (masked_lm_loss,) + output if masked_lm_loss is not None else output
        return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class DebertaPredictionHeadTransform(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.dense = nn.Linear(config.hidden_size, self.embedding_size)
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

class DebertaLMPredictionHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.transform = DebertaPredictionHeadTransform(config)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
        self.decoder.bias = self.bias

    def _tie_weights(self):
        self.decoder.bias = self.bias

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states)
        return hidden_states

class DebertaOnlyMLMHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.predictions = DebertaLMPredictionHead(config)

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores

@add_start_docstrings('\n    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', DEBERTA_START_DOCSTRING)
class DebertaForSequenceClassification(DebertaPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        num_labels = getattr(config, 'num_labels', 2)
        self.num_labels = num_labels
        self.deberta = DebertaModel(config)
        self.pooler = ContextPooler(config)
        output_dim = self.pooler.output_dim
        self.classifier = nn.Linear(output_dim, num_labels)
        drop_out = getattr(config, 'cls_dropout', None)
        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
        self.dropout = StableDropout(drop_out)
        self.post_init()

    def get_input_embeddings(self):
        return self.deberta.get_input_embeddings()

    def set_input_embeddings(self, new_embeddings):
        self.deberta.set_input_embeddings(new_embeddings)

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        encoder_layer = outputs[0]
        pooled_output = self.pooler(encoder_layer)
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    loss_fn = nn.MSELoss()
                    logits = logits.view(-1).to(labels.dtype)
                    loss = loss_fn(logits, labels.view(-1))
                elif labels.dim() == 1 or labels.size(-1) == 1:
                    label_index = (labels >= 0).nonzero()
                    labels = labels.long()
                    if label_index.size(0) > 0:
                        labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
                        labels = torch.gather(labels, 0, label_index.view(-1))
                        loss_fct = CrossEntropyLoss()
                        loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
                    else:
                        loss = torch.tensor(0).to(logits)
                else:
                    log_softmax = nn.LogSoftmax(-1)
                    loss = -(log_softmax(logits) * labels).sum(-1).mean()
            elif self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', DEBERTA_START_DOCSTRING)
class DebertaForTokenClassification(DebertaPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deberta = DebertaModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', DEBERTA_START_DOCSTRING)
class DebertaForQuestionAnswering(DebertaPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deberta = DebertaModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1).contiguous()
        end_logits = end_logits.squeeze(-1).contiguous()
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return (total_loss,) + output if total_loss is not None else output
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/deberta/modeling_tf_deberta.py
""""""
from __future__ import annotations
import math
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFMaskedLMOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput
from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, unpack_inputs
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta import DebertaConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DebertaConfig'
_CHECKPOINT_FOR_DOC = 'kamalkraj/deberta-base'

class TFDebertaContextPooler(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.pooler_hidden_size, name='dense')
        self.dropout = TFDebertaStableDropout(config.pooler_dropout, name='dropout')
        self.config = config

    def call(self, hidden_states, training: bool=False):
        context_token = hidden_states[:, 0]
        context_token = self.dropout(context_token, training=training)
        pooled_output = self.dense(context_token)
        pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
        return pooled_output

    @property
    def output_dim(self) -> int:
        return self.config.hidden_size

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.pooler_hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaXSoftmax(keras.layers.Layer):

    def __init__(self, axis=-1, **kwargs):
        super().__init__(**kwargs)
        self.axis = axis

    def call(self, inputs: tf.Tensor, mask: tf.Tensor):
        rmask = tf.logical_not(tf.cast(mask, tf.bool))
        output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)
        output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)
        output = tf.where(rmask, 0.0, output)
        return output

class TFDebertaStableDropout(keras.layers.Layer):

    def __init__(self, drop_prob, **kwargs):
        super().__init__(**kwargs)
        self.drop_prob = drop_prob

    @tf.custom_gradient
    def xdropout(self, inputs):
        mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool)
        scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)
        if self.drop_prob > 0:
            inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale

        def grad(upstream):
            if self.drop_prob > 0:
                return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale
            else:
                return upstream
        return (inputs, grad)

    def call(self, inputs: tf.Tensor, training: tf.Tensor=False):
        if training:
            return self.xdropout(inputs)
        return inputs

class TFDebertaLayerNorm(keras.layers.Layer):

    def __init__(self, size, eps=1e-12, **kwargs):
        super().__init__(**kwargs)
        self.size = size
        self.eps = eps

    def build(self, input_shape):
        self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name='weight')
        self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name='bias')
        return super().build(input_shape)

    def call(self, x: tf.Tensor) -> tf.Tensor:
        mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
        variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
        std = tf.math.sqrt(variance + self.eps)
        return self.gamma * (x - mean) / std + self.beta

class TFDebertaSelfOutput(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.hidden_size, name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name='dropout')
        self.config = config

    def call(self, hidden_states, input_tensor, training: bool=False):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaAttention(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.self = TFDebertaDisentangledSelfAttention(config, name='self')
        self.dense_output = TFDebertaSelfOutput(config, name='output')
        self.config = config

    def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        self_outputs = self.self(hidden_states=input_tensor, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
        if query_states is None:
            query_states = input_tensor
        attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=query_states, training=training)
        output = (attention_output,) + self_outputs[1:]
        return output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'self', None) is not None:
            with tf.name_scope(self.self.name):
                self.self.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class TFDebertaIntermediate(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.intermediate_act_fn = config.hidden_act
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFDebertaOutput(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name='dropout')
        self.config = config

    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.intermediate_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaLayer(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFDebertaAttention(config, name='attention')
        self.intermediate = TFDebertaIntermediate(config, name='intermediate')
        self.bert_output = TFDebertaOutput(config, name='output')

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(hidden_states=attention_output)
        layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training)
        outputs = (layer_output,) + attention_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'bert_output', None) is not None:
            with tf.name_scope(self.bert_output.name):
                self.bert_output.build(None)

class TFDebertaEncoder(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.layer = [TFDebertaLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)]
        self.relative_attention = getattr(config, 'relative_attention', False)
        self.config = config
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if self.relative_attention:
            self.rel_embeddings = self.add_weight(name='rel_embeddings.weight', shape=[self.max_relative_positions * 2, self.config.hidden_size], initializer=get_initializer(self.config.initializer_range))
        if getattr(self, 'layer', None) is not None:
            for layer in self.layer:
                with tf.name_scope(layer.name):
                    layer.build(None)

    def get_rel_embedding(self):
        rel_embeddings = self.rel_embeddings if self.relative_attention else None
        return rel_embeddings

    def get_attention_mask(self, attention_mask):
        if len(shape_list(attention_mask)) <= 2:
            extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
            attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
            attention_mask = tf.cast(attention_mask, tf.uint8)
        elif len(shape_list(attention_mask)) == 3:
            attention_mask = tf.expand_dims(attention_mask, 1)
        return attention_mask

    def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
        if self.relative_attention and relative_pos is None:
            q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
            relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
        return relative_pos

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        attention_mask = self.get_attention_mask(attention_mask)
        relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
        if isinstance(hidden_states, Sequence):
            next_kv = hidden_states[0]
        else:
            next_kv = hidden_states
        rel_embeddings = self.get_rel_embedding()
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_outputs = layer_module(hidden_states=next_kv, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
            hidden_states = layer_outputs[0]
            if query_states is not None:
                query_states = hidden_states
                if isinstance(hidden_states, Sequence):
                    next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
            else:
                next_kv = hidden_states
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)

def build_relative_position(query_size, key_size):
    q_ids = tf.range(query_size, dtype=tf.int32)
    k_ids = tf.range(key_size, dtype=tf.int32)
    rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
    rel_pos_ids = rel_pos_ids[:query_size, :]
    rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
    return tf.cast(rel_pos_ids, tf.int64)

def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
    shapes = [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(query_layer)[2], shape_list(relative_pos)[-1]]
    return tf.broadcast_to(c2p_pos, shapes)

def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
    shapes = [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(key_layer)[-2], shape_list(key_layer)[-2]]
    return tf.broadcast_to(c2p_pos, shapes)

def pos_dynamic_expand(pos_index, p2c_att, key_layer):
    shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
    return tf.broadcast_to(pos_index, shapes)

def torch_gather(x, indices, gather_axis):
    if gather_axis < 0:
        gather_axis = tf.rank(x) + gather_axis
    if gather_axis != tf.rank(x) - 1:
        pre_roll = tf.rank(x) - 1 - gather_axis
        permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
        x = tf.transpose(x, perm=permutation)
        indices = tf.transpose(indices, perm=permutation)
    else:
        pre_roll = 0
    flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
    flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
    gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
    gathered = tf.reshape(gathered, tf.shape(indices))
    if pre_roll != 0:
        permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
        gathered = tf.transpose(gathered, perm=permutation)
    return gathered

class TFDebertaDisentangledSelfAttention(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.in_proj = keras.layers.Dense(self.all_head_size * 3, kernel_initializer=get_initializer(config.initializer_range), name='in_proj', use_bias=False)
        self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
        self.relative_attention = getattr(config, 'relative_attention', False)
        self.talking_head = getattr(config, 'talking_head', False)
        if self.talking_head:
            self.head_logits_proj = keras.layers.Dense(self.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), name='head_logits_proj', use_bias=False)
            self.head_weights_proj = keras.layers.Dense(self.num_attention_heads, kernel_initializer=get_initializer(config.initializer_range), name='head_weights_proj', use_bias=False)
        self.softmax = TFDebertaXSoftmax(axis=-1)
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name='pos_dropout')
            if 'c2p' in self.pos_att_type:
                self.pos_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='pos_proj', use_bias=False)
            if 'p2c' in self.pos_att_type:
                self.pos_q_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='pos_q_proj')
        self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name='dropout')
        self.config = config

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        self.q_bias = self.add_weight(name='q_bias', shape=self.all_head_size, initializer=keras.initializers.Zeros())
        self.v_bias = self.add_weight(name='v_bias', shape=self.all_head_size, initializer=keras.initializers.Zeros())
        if getattr(self, 'in_proj', None) is not None:
            with tf.name_scope(self.in_proj.name):
                self.in_proj.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'head_logits_proj', None) is not None:
            with tf.name_scope(self.head_logits_proj.name):
                self.head_logits_proj.build(None)
        if getattr(self, 'head_weights_proj', None) is not None:
            with tf.name_scope(self.head_weights_proj.name):
                self.head_weights_proj.build(None)
        if getattr(self, 'pos_dropout', None) is not None:
            with tf.name_scope(self.pos_dropout.name):
                self.pos_dropout.build(None)
        if getattr(self, 'pos_proj', None) is not None:
            with tf.name_scope(self.pos_proj.name):
                self.pos_proj.build([self.config.hidden_size])
        if getattr(self, 'pos_q_proj', None) is not None:
            with tf.name_scope(self.pos_q_proj.name):
                self.pos_q_proj.build([self.config.hidden_size])

    def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
        shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
        tensor = tf.reshape(tensor=tensor, shape=shape)
        return tf.transpose(tensor, perm=[0, 2, 1, 3])

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        if query_states is None:
            qp = self.in_proj(hidden_states)
            (query_layer, key_layer, value_layer) = tf.split(self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1)
        else:

            def linear(w, b, x):
                out = tf.matmul(x, w, transpose_b=True)
                if b is not None:
                    out += tf.transpose(b)
                return out
            ws = tf.split(tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0)
            qkvw = tf.TensorArray(dtype=self.dtype, size=3)
            for k in tf.range(3):
                qkvw_inside = tf.TensorArray(dtype=self.dtype, size=self.num_attention_heads)
                for i in tf.range(self.num_attention_heads):
                    qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
                qkvw = qkvw.write(k, qkvw_inside.concat())
            qkvb = [None] * 3
            q = linear(qkvw[0], qkvb[0], query_states)
            k = linear(qkvw[1], qkvb[1], hidden_states)
            v = linear(qkvw[2], qkvb[2], hidden_states)
            query_layer = self.transpose_for_scores(q)
            key_layer = self.transpose_for_scores(k)
            value_layer = self.transpose_for_scores(v)
        query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
        value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
        rel_att = None
        scale_factor = 1 + len(self.pos_att_type)
        scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
        query_layer = query_layer / scale
        attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
        if self.relative_attention:
            rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
            rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
        if rel_att is not None:
            attention_scores = attention_scores + rel_att
        if self.talking_head:
            attention_scores = tf.transpose(self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2])
        attention_probs = self.softmax(attention_scores, attention_mask)
        attention_probs = self.dropout(attention_probs, training=training)
        if self.talking_head:
            attention_probs = tf.transpose(self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2])
        context_layer = tf.matmul(attention_probs, value_layer)
        context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
        context_layer_shape = shape_list(context_layer)
        new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
        context_layer = tf.reshape(context_layer, new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

    def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = shape_list(query_layer)[-2]
            relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
        shape_list_pos = shape_list(relative_pos)
        if len(shape_list_pos) == 2:
            relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
        elif len(shape_list_pos) == 3:
            relative_pos = tf.expand_dims(relative_pos, 1)
        elif len(shape_list_pos) != 4:
            raise ValueError(f'Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}')
        att_span = tf.cast(tf.minimum(tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions), tf.int64)
        rel_embeddings = tf.expand_dims(rel_embeddings[self.max_relative_positions - att_span:self.max_relative_positions + att_span, :], 0)
        score = 0
        if 'c2p' in self.pos_att_type:
            pos_key_layer = self.pos_proj(rel_embeddings)
            pos_key_layer = self.transpose_for_scores(pos_key_layer)
            c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
            c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
            c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
            score += c2p_att
        if 'p2c' in self.pos_att_type:
            pos_query_layer = self.pos_q_proj(rel_embeddings)
            pos_query_layer = self.transpose_for_scores(pos_query_layer)
            pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=self.compute_dtype))
            if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
                r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
            else:
                r_pos = relative_pos
            p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
            p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
            p2c_att = tf.transpose(torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2])
            if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
                pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
                p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
            score += p2c_att
        return score

class TFDebertaEmbeddings(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.hidden_size = config.hidden_size
        self.max_position_embeddings = config.max_position_embeddings
        self.position_biased_input = getattr(config, 'position_biased_input', True)
        self.initializer_range = config.initializer_range
        if self.embedding_size != config.hidden_size:
            self.embed_proj = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='embed_proj', use_bias=False)
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name='dropout')

    def build(self, input_shape=None):
        with tf.name_scope('word_embeddings'):
            self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
        with tf.name_scope('token_type_embeddings'):
            if self.config.type_vocab_size > 0:
                self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
            else:
                self.token_type_embeddings = None
        with tf.name_scope('position_embeddings'):
            if self.position_biased_input:
                self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range))
            else:
                self.position_embeddings = None
        if self.built:
            return
        self.built = True
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'embed_proj', None) is not None:
            with tf.name_scope(self.embed_proj.name):
                self.embed_proj.build([None, None, self.embedding_size])

    def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, mask: tf.Tensor=None, training: bool=False) -> tf.Tensor:
        if input_ids is None and inputs_embeds is None:
            raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
        if input_ids is not None:
            check_embeddings_within_bounds(input_ids, self.config.vocab_size)
            inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
        input_shape = shape_list(inputs_embeds)[:-1]
        if token_type_ids is None:
            token_type_ids = tf.fill(dims=input_shape, value=0)
        if position_ids is None:
            position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
        final_embeddings = inputs_embeds
        if self.position_biased_input:
            position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
            final_embeddings += position_embeds
        if self.config.type_vocab_size > 0:
            token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
            final_embeddings += token_type_embeds
        if self.embedding_size != self.hidden_size:
            final_embeddings = self.embed_proj(final_embeddings)
        final_embeddings = self.LayerNorm(final_embeddings)
        if mask is not None:
            if len(shape_list(mask)) != len(shape_list(final_embeddings)):
                if len(shape_list(mask)) == 4:
                    mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
                mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype)
            final_embeddings = final_embeddings * mask
        final_embeddings = self.dropout(final_embeddings, training=training)
        return final_embeddings

class TFDebertaPredictionHeadTransform(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.dense = keras.layers.Dense(units=self.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.embedding_size])

class TFDebertaLMPredictionHead(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.transform = TFDebertaPredictionHeadTransform(config, name='transform')
        self.input_embeddings = input_embeddings

    def build(self, input_shape=None):
        self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias')
        if self.built:
            return
        self.built = True
        if getattr(self, 'transform', None) is not None:
            with tf.name_scope(self.transform.name):
                self.transform.build(None)

    def get_output_embeddings(self) -> keras.layers.Layer:
        return self.input_embeddings

    def set_output_embeddings(self, value: tf.Variable):
        self.input_embeddings.weight = value
        self.input_embeddings.vocab_size = shape_list(value)[0]

    def get_bias(self) -> Dict[str, tf.Variable]:
        return {'bias': self.bias}

    def set_bias(self, value: tf.Variable):
        self.bias = value['bias']
        self.config.vocab_size = shape_list(value['bias'])[0]

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.transform(hidden_states=hidden_states)
        seq_length = shape_list(hidden_states)[1]
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
        hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
        hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
        return hidden_states

class TFDebertaOnlyMLMHead(keras.layers.Layer):

    def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
        super().__init__(**kwargs)
        self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name='predictions')

    def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
        prediction_scores = self.predictions(hidden_states=sequence_output)
        return prediction_scores

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'predictions', None) is not None:
            with tf.name_scope(self.predictions.name):
                self.predictions.build(None)

class TFDebertaMainLayer(keras.layers.Layer):
    config_class = DebertaConfig

    def __init__(self, config: DebertaConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embeddings = TFDebertaEmbeddings(config, name='embeddings')
        self.encoder = TFDebertaEncoder(config, name='encoder')

    def get_input_embeddings(self) -> keras.layers.Layer:
        return self.embeddings

    def set_input_embeddings(self, value: tf.Variable):
        self.embeddings.weight = value
        self.embeddings.vocab_size = shape_list(value)[0]

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    @unpack_inputs
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            input_shape = shape_list(input_ids)
        elif inputs_embeds is not None:
            input_shape = shape_list(inputs_embeds)[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        if attention_mask is None:
            attention_mask = tf.fill(dims=input_shape, value=1)
        if token_type_ids is None:
            token_type_ids = tf.fill(dims=input_shape, value=0)
        embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, mask=attention_mask, training=training)
        encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1:]
        return TFBaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)

class TFDebertaPreTrainedModel(TFPreTrainedModel):
    config_class = DebertaConfig
    base_model_prefix = 'deberta'
DEBERTA_START_DOCSTRING = '\n    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled\n    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\'s build\n    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Parameters:\n        config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DEBERTA_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.\n"

@add_start_docstrings('The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.', DEBERTA_START_DOCSTRING)
class TFDebertaModel(TFDebertaPreTrainedModel):

    def __init__(self, config: DebertaConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.deberta = TFDebertaMainLayer(config, name='deberta')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)

@add_start_docstrings('DeBERTa Model with a `language modeling` head on top.', DEBERTA_START_DOCSTRING)
class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):

    def __init__(self, config: DebertaConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        if config.is_decoder:
            logger.warning('If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
        self.deberta = TFDebertaMainLayer(config, name='deberta')
        self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name='cls')

    def get_lm_head(self) -> keras.layers.Layer:
        return self.mlm.predictions

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'mlm', None) is not None:
            with tf.name_scope(self.mlm.name):
                self.mlm.build(None)

@add_start_docstrings('\n    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: DebertaConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaMainLayer(config, name='deberta')
        self.pooler = TFDebertaContextPooler(config, name='pooler')
        drop_out = getattr(config, 'cls_dropout', None)
        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
        self.dropout = TFDebertaStableDropout(drop_out, name='cls_dropout')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.output_dim = self.pooler.output_dim

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        pooled_output = self.pooler(sequence_output, training=training)
        pooled_output = self.dropout(pooled_output, training=training)
        logits = self.classifier(pooled_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'pooler', None) is not None:
            with tf.name_scope(self.pooler.name):
                self.pooler.build(None)
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.output_dim])

@add_start_docstrings('\n    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):

    def __init__(self, config: DebertaConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaMainLayer(config, name='deberta')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output, training=training)
        logits = self.classifier(inputs=sequence_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):

    def __init__(self, config: DebertaConfig, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaMainLayer(config, name='deberta')
        self.qa_outputs = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        logits = self.qa_outputs(inputs=sequence_output)
        (start_logits, end_logits) = tf.split(value=logits, num_or_size_splits=2, axis=-1)
        start_logits = tf.squeeze(input=start_logits, axis=-1)
        end_logits = tf.squeeze(input=end_logits, axis=-1)
        loss = None
        if start_positions is not None and end_positions is not None:
            labels = {'start_position': start_positions}
            labels['end_position'] = end_positions
            loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'qa_outputs', None) is not None:
            with tf.name_scope(self.qa_outputs.name):
                self.qa_outputs.build([None, None, self.config.hidden_size])

# File: transformers-main/src/transformers/models/deberta/tokenization_deberta.py
""""""
import json
import os
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}

def bytes_to_unicode():
    bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1))
    cs = bs[:]
    n = 0
    for b in range(2 ** 8):
        if b not in bs:
            bs.append(b)
            cs.append(2 ** 8 + n)
            n += 1
    cs = [chr(n) for n in cs]
    return dict(zip(bs, cs))

def get_pairs(word):
    pairs = set()
    prev_char = word[0]
    for char in word[1:]:
        pairs.add((prev_char, char))
        prev_char = char
    return pairs

class DebertaTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']

    def __init__(self, vocab_file, merges_file, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, add_bos_token=False, **kwargs):
        bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
        eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
        sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
        cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
        unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
        pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
        mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
        self.add_bos_token = add_bos_token
        with open(vocab_file, encoding='utf-8') as vocab_handle:
            self.encoder = json.load(vocab_handle)
        self.decoder = {v: k for (k, v) in self.encoder.items()}
        self.errors = errors
        self.byte_encoder = bytes_to_unicode()
        self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
        with open(merges_file, encoding='utf-8') as merges_handle:
            bpe_merges = merges_handle.read().split('\n')[1:-1]
        bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
        self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
        self.cache = {}
        self.add_prefix_space = add_prefix_space
        self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
        super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, **kwargs)

    @property
    def vocab_size(self):
        return len(self.encoder)

    def get_vocab(self):
        return dict(self.encoder, **self.added_tokens_encoder)

    def bpe(self, token):
        if token in self.cache:
            return self.cache[token]
        word = tuple(token)
        pairs = get_pairs(word)
        if not pairs:
            return token
        while True:
            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
            if bigram not in self.bpe_ranks:
                break
            (first, second) = bigram
            new_word = []
            i = 0
            while i < len(word):
                try:
                    j = word.index(first, i)
                except ValueError:
                    new_word.extend(word[i:])
                    break
                else:
                    new_word.extend(word[i:j])
                    i = j
                if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
                    new_word.append(first + second)
                    i += 2
                else:
                    new_word.append(word[i])
                    i += 1
            new_word = tuple(new_word)
            word = new_word
            if len(word) == 1:
                break
            else:
                pairs = get_pairs(word)
        word = ' '.join(word)
        self.cache[token] = word
        return word

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is None:
            return [1] + [0] * len(token_ids_0) + [1]
        return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def _tokenize(self, text):
        bpe_tokens = []
        for token in re.findall(self.pat, text):
            token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
            bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
        return bpe_tokens

    def _convert_token_to_id(self, token):
        return self.encoder.get(token, self.encoder.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.decoder.get(index)

    def convert_tokens_to_string(self, tokens):
        text = ''.join(tokens)
        text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
        return text

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
        with open(vocab_file, 'w', encoding='utf-8') as f:
            f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
        index = 0
        with open(merge_file, 'w', encoding='utf-8') as writer:
            writer.write('#version: 0.2\n')
            for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
                    index = token_index
                writer.write(' '.join(bpe_tokens) + '\n')
                index += 1
        return (vocab_file, merge_file)

    def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
        add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
        if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())):
            text = ' ' + text
        return (text, kwargs)

# File: transformers-main/src/transformers/models/deberta/tokenization_deberta_fast.py
""""""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_deberta import DebertaTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}

class DebertaTokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
    slow_tokenizer_class = DebertaTokenizer

    def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, **kwargs):
        super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
        self.add_bos_token = kwargs.pop('add_bos_token', False)
        pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
        if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space:
            pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
            pre_tok_state['add_prefix_space'] = add_prefix_space
            self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
        self.add_prefix_space = add_prefix_space

    @property
    def mask_token(self) -> str:
        if self._mask_token is None:
            if self.verbose:
                logger.error('Using mask_token, but it is not set yet.')
            return None
        return str(self._mask_token)

    @mask_token.setter
    def mask_token(self, value):
        value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
        self._mask_token = value

    def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
        return super()._batch_encode_plus(*args, **kwargs)

    def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
        is_split_into_words = kwargs.get('is_split_into_words', False)
        assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
        return super()._encode_plus(*args, **kwargs)

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        files = self._tokenizer.model.save(save_directory, name=filename_prefix)
        return tuple(files)

# File: transformers-main/src/transformers/models/deberta_v2/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {'configuration_deberta_v2': ['DebertaV2Config', 'DebertaV2OnnxConfig'], 'tokenization_deberta_v2': ['DebertaV2Tokenizer']}
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_deberta_v2_fast'] = ['DebertaV2TokenizerFast']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_deberta_v2'] = ['TFDebertaV2ForMaskedLM', 'TFDebertaV2ForQuestionAnswering', 'TFDebertaV2ForMultipleChoice', 'TFDebertaV2ForSequenceClassification', 'TFDebertaV2ForTokenClassification', 'TFDebertaV2Model', 'TFDebertaV2PreTrainedModel']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_deberta_v2'] = ['DebertaV2ForMaskedLM', 'DebertaV2ForMultipleChoice', 'DebertaV2ForQuestionAnswering', 'DebertaV2ForSequenceClassification', 'DebertaV2ForTokenClassification', 'DebertaV2Model', 'DebertaV2PreTrainedModel']
if TYPE_CHECKING:
    from .configuration_deberta_v2 import DebertaV2Config, DebertaV2OnnxConfig
    from .tokenization_deberta_v2 import DebertaV2Tokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_deberta_v2_fast import DebertaV2TokenizerFast
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_deberta_v2 import TFDebertaV2ForMaskedLM, TFDebertaV2ForMultipleChoice, TFDebertaV2ForQuestionAnswering, TFDebertaV2ForSequenceClassification, TFDebertaV2ForTokenClassification, TFDebertaV2Model, TFDebertaV2PreTrainedModel
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_deberta_v2 import DebertaV2ForMaskedLM, DebertaV2ForMultipleChoice, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, DebertaV2PreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deberta_v2/configuration_deberta_v2.py
""""""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
    from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
logger = logging.get_logger(__name__)

class DebertaV2Config(PretrainedConfig):
    model_type = 'deberta-v2'

    def __init__(self, vocab_size=128100, hidden_size=1536, num_hidden_layers=24, num_attention_heads=24, intermediate_size=6144, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=0, initializer_range=0.02, layer_norm_eps=1e-07, relative_attention=False, max_relative_positions=-1, pad_token_id=0, position_biased_input=True, pos_att_type=None, pooler_dropout=0, pooler_hidden_act='gelu', **kwargs):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.type_vocab_size = type_vocab_size
        self.initializer_range = initializer_range
        self.relative_attention = relative_attention
        self.max_relative_positions = max_relative_positions
        self.pad_token_id = pad_token_id
        self.position_biased_input = position_biased_input
        if isinstance(pos_att_type, str):
            pos_att_type = [x.strip() for x in pos_att_type.lower().split('|')]
        self.pos_att_type = pos_att_type
        self.vocab_size = vocab_size
        self.layer_norm_eps = layer_norm_eps
        self.pooler_hidden_size = kwargs.get('pooler_hidden_size', hidden_size)
        self.pooler_dropout = pooler_dropout
        self.pooler_hidden_act = pooler_hidden_act

class DebertaV2OnnxConfig(OnnxConfig):

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        if self.task == 'multiple-choice':
            dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
        else:
            dynamic_axis = {0: 'batch', 1: 'sequence'}
        if self._config.type_vocab_size > 0:
            return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
        else:
            return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])

    @property
    def default_onnx_opset(self) -> int:
        return 12

    def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, framework: Optional['TensorType']=None, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[str, Any]:
        dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
        if self._config.type_vocab_size == 0 and 'token_type_ids' in dummy_inputs:
            del dummy_inputs['token_type_ids']
        return dummy_inputs

# File: transformers-main/src/transformers/models/deberta_v2/modeling_deberta_v2.py
""""""
from collections.abc import Sequence
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import softmax_backward_data
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DebertaV2Config'
_CHECKPOINT_FOR_DOC = 'microsoft/deberta-v2-xlarge'
_QA_TARGET_START_INDEX = 2
_QA_TARGET_END_INDEX = 9

class ContextPooler(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
        self.dropout = StableDropout(config.pooler_dropout)
        self.config = config

    def forward(self, hidden_states):
        context_token = hidden_states[:, 0]
        context_token = self.dropout(context_token)
        pooled_output = self.dense(context_token)
        pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
        return pooled_output

    @property
    def output_dim(self):
        return self.config.hidden_size

class XSoftmax(torch.autograd.Function):

    @staticmethod
    def forward(ctx, input, mask, dim):
        ctx.dim = dim
        rmask = ~mask.to(torch.bool)
        output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
        output = torch.softmax(output, ctx.dim)
        output.masked_fill_(rmask, 0)
        ctx.save_for_backward(output)
        return output

    @staticmethod
    def backward(ctx, grad_output):
        (output,) = ctx.saved_tensors
        inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)
        return (inputGrad, None, None)

    @staticmethod
    def symbolic(g, self, mask, dim):
        import torch.onnx.symbolic_helper as sym_help
        from torch.onnx.symbolic_opset9 import masked_fill, softmax
        mask_cast_value = g.op('Cast', mask, to_i=sym_help.cast_pytorch_to_onnx['Long'])
        r_mask = g.op('Cast', g.op('Sub', g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Bool'])
        output = masked_fill(g, self, r_mask, g.op('Constant', value_t=torch.tensor(torch.finfo(self.type().dtype()).min)))
        output = softmax(g, output, dim)
        return masked_fill(g, output, r_mask, g.op('Constant', value_t=torch.tensor(0, dtype=torch.bool)))

class DropoutContext:

    def __init__(self):
        self.dropout = 0
        self.mask = None
        self.scale = 1
        self.reuse_mask = True

def get_mask(input, local_context):
    if not isinstance(local_context, DropoutContext):
        dropout = local_context
        mask = None
    else:
        dropout = local_context.dropout
        dropout *= local_context.scale
        mask = local_context.mask if local_context.reuse_mask else None
    if dropout > 0 and mask is None:
        mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
    if isinstance(local_context, DropoutContext):
        if local_context.mask is None:
            local_context.mask = mask
    return (mask, dropout)

class XDropout(torch.autograd.Function):

    @staticmethod
    def forward(ctx, input, local_ctx):
        (mask, dropout) = get_mask(input, local_ctx)
        ctx.scale = 1.0 / (1 - dropout)
        if dropout > 0:
            ctx.save_for_backward(mask)
            return input.masked_fill(mask, 0) * ctx.scale
        else:
            return input

    @staticmethod
    def backward(ctx, grad_output):
        if ctx.scale > 1:
            (mask,) = ctx.saved_tensors
            return (grad_output.masked_fill(mask, 0) * ctx.scale, None)
        else:
            return (grad_output, None)

    @staticmethod
    def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
        from torch.onnx import symbolic_opset12
        dropout_p = local_ctx
        if isinstance(local_ctx, DropoutContext):
            dropout_p = local_ctx.dropout
        train = True
        return symbolic_opset12.dropout(g, input, dropout_p, train)

class StableDropout(nn.Module):

    def __init__(self, drop_prob):
        super().__init__()
        self.drop_prob = drop_prob
        self.count = 0
        self.context_stack = None

    def forward(self, x):
        if self.training and self.drop_prob > 0:
            return XDropout.apply(x, self.get_context())
        return x

    def clear_context(self):
        self.count = 0
        self.context_stack = None

    def init_context(self, reuse_mask=True, scale=1):
        if self.context_stack is None:
            self.context_stack = []
        self.count = 0
        for c in self.context_stack:
            c.reuse_mask = reuse_mask
            c.scale = scale

    def get_context(self):
        if self.context_stack is not None:
            if self.count >= len(self.context_stack):
                self.context_stack.append(DropoutContext())
            ctx = self.context_stack[self.count]
            ctx.dropout = self.drop_prob
            self.count += 1
            return ctx
        else:
            return self.drop_prob

class DebertaV2SelfOutput(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class DebertaV2Attention(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.self = DisentangledSelfAttention(config)
        self.output = DebertaV2SelfOutput(config)
        self.config = config

    def forward(self, hidden_states, attention_mask, output_attentions=False, query_states=None, relative_pos=None, rel_embeddings=None):
        self_output = self.self(hidden_states, attention_mask, output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
        if output_attentions:
            (self_output, att_matrix) = self_output
        if query_states is None:
            query_states = hidden_states
        attention_output = self.output(self_output, query_states)
        if output_attentions:
            return (attention_output, att_matrix)
        else:
            return attention_output

class DebertaV2Intermediate(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class DebertaV2Output(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)
        self.config = config

    def forward(self, hidden_states, input_tensor):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

class DebertaV2Layer(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.attention = DebertaV2Attention(config)
        self.intermediate = DebertaV2Intermediate(config)
        self.output = DebertaV2Output(config)

    def forward(self, hidden_states, attention_mask, query_states=None, relative_pos=None, rel_embeddings=None, output_attentions=False):
        attention_output = self.attention(hidden_states, attention_mask, output_attentions=output_attentions, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings)
        if output_attentions:
            (attention_output, att_matrix) = attention_output
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        if output_attentions:
            return (layer_output, att_matrix)
        else:
            return layer_output

class ConvLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        kernel_size = getattr(config, 'conv_kernel_size', 3)
        groups = getattr(config, 'conv_groups', 1)
        self.conv_act = getattr(config, 'conv_act', 'tanh')
        self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups)
        self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)
        self.config = config

    def forward(self, hidden_states, residual_states, input_mask):
        out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
        rmask = (1 - input_mask).bool()
        out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
        out = ACT2FN[self.conv_act](self.dropout(out))
        layer_norm_input = residual_states + out
        output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
        if input_mask is None:
            output_states = output
        else:
            if input_mask.dim() != layer_norm_input.dim():
                if input_mask.dim() == 4:
                    input_mask = input_mask.squeeze(1).squeeze(1)
                input_mask = input_mask.unsqueeze(2)
            input_mask = input_mask.to(output.dtype)
            output_states = output * input_mask
        return output_states

class DebertaV2Encoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
        self.relative_attention = getattr(config, 'relative_attention', False)
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.position_buckets = getattr(config, 'position_buckets', -1)
            pos_ebd_size = self.max_relative_positions * 2
            if self.position_buckets > 0:
                pos_ebd_size = self.position_buckets * 2
            self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
        self.norm_rel_ebd = [x.strip() for x in getattr(config, 'norm_rel_ebd', 'none').lower().split('|')]
        if 'layer_norm' in self.norm_rel_ebd:
            self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
        self.conv = ConvLayer(config) if getattr(config, 'conv_kernel_size', 0) > 0 else None
        self.gradient_checkpointing = False

    def get_rel_embedding(self):
        rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
        if rel_embeddings is not None and 'layer_norm' in self.norm_rel_ebd:
            rel_embeddings = self.LayerNorm(rel_embeddings)
        return rel_embeddings

    def get_attention_mask(self, attention_mask):
        if attention_mask.dim() <= 2:
            extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
            attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
        elif attention_mask.dim() == 3:
            attention_mask = attention_mask.unsqueeze(1)
        return attention_mask

    def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
        if self.relative_attention and relative_pos is None:
            q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
            relative_pos = build_relative_position(q, hidden_states.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions, device=hidden_states.device)
        return relative_pos

    def forward(self, hidden_states, attention_mask, output_hidden_states=True, output_attentions=False, query_states=None, relative_pos=None, return_dict=True):
        if attention_mask.dim() <= 2:
            input_mask = attention_mask
        else:
            input_mask = attention_mask.sum(-2) > 0
        attention_mask = self.get_attention_mask(attention_mask)
        relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        if isinstance(hidden_states, Sequence):
            next_kv = hidden_states[0]
        else:
            next_kv = hidden_states
        rel_embeddings = self.get_rel_embedding()
        output_states = next_kv
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (output_states,)
            if self.gradient_checkpointing and self.training:
                output_states = self._gradient_checkpointing_func(layer_module.__call__, next_kv, attention_mask, query_states, relative_pos, rel_embeddings, output_attentions)
            else:
                output_states = layer_module(next_kv, attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions)
            if output_attentions:
                (output_states, att_m) = output_states
            if i == 0 and self.conv is not None:
                output_states = self.conv(hidden_states, output_states, input_mask)
            if query_states is not None:
                query_states = output_states
                if isinstance(hidden_states, Sequence):
                    next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
            else:
                next_kv = output_states
            if output_attentions:
                all_attentions = all_attentions + (att_m,)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (output_states,)
        if not return_dict:
            return tuple((v for v in [output_states, all_hidden_states, all_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions)

def make_log_bucket_position(relative_pos, bucket_size, max_position):
    sign = torch.sign(relative_pos)
    mid = bucket_size // 2
    abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), torch.tensor(mid - 1).type_as(relative_pos), torch.abs(relative_pos))
    log_pos = torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
    bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
    return bucket_pos

def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):
    q_ids = torch.arange(0, query_size, device=device)
    k_ids = torch.arange(0, key_size, device=device)
    rel_pos_ids = q_ids[:, None] - k_ids[None, :]
    if bucket_size > 0 and max_position > 0:
        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
    rel_pos_ids = rel_pos_ids.to(torch.long)
    rel_pos_ids = rel_pos_ids[:query_size, :]
    rel_pos_ids = rel_pos_ids.unsqueeze(0)
    return rel_pos_ids

@torch.jit.script
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
    return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])

@torch.jit.script
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
    return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])

@torch.jit.script
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
    return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))

class DisentangledSelfAttention(nn.Module):

    def __init__(self, config):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        _attention_head_size = config.hidden_size // config.num_attention_heads
        self.attention_head_size = getattr(config, 'attention_head_size', _attention_head_size)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
        self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
        self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
        self.share_att_key = getattr(config, 'share_att_key', False)
        self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
        self.relative_attention = getattr(config, 'relative_attention', False)
        if self.relative_attention:
            self.position_buckets = getattr(config, 'position_buckets', -1)
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.pos_ebd_size = self.max_relative_positions
            if self.position_buckets > 0:
                self.pos_ebd_size = self.position_buckets
            self.pos_dropout = StableDropout(config.hidden_dropout_prob)
            if not self.share_att_key:
                if 'c2p' in self.pos_att_type:
                    self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
                if 'p2c' in self.pos_att_type:
                    self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
        self.dropout = StableDropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x, attention_heads):
        new_x_shape = x.size()[:-1] + (attention_heads, -1)
        x = x.view(new_x_shape)
        return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))

    def forward(self, hidden_states, attention_mask, output_attentions=False, query_states=None, relative_pos=None, rel_embeddings=None):
        if query_states is None:
            query_states = hidden_states
        query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
        key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
        value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
        rel_att = None
        scale_factor = 1
        if 'c2p' in self.pos_att_type:
            scale_factor += 1
        if 'p2c' in self.pos_att_type:
            scale_factor += 1
        scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
        attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))
        if self.relative_attention:
            rel_embeddings = self.pos_dropout(rel_embeddings)
            rel_att = self.disentangled_attention_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
        if rel_att is not None:
            attention_scores = attention_scores + rel_att
        attention_scores = attention_scores
        attention_scores = attention_scores.view(-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1))
        attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
        attention_probs = self.dropout(attention_probs)
        context_layer = torch.bmm(attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer)
        context_layer = context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1)).permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (-1,)
        context_layer = context_layer.view(new_context_layer_shape)
        if output_attentions:
            return (context_layer, attention_probs)
        else:
            return context_layer

    def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = query_layer.size(-2)
            relative_pos = build_relative_position(q, key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions, device=query_layer.device)
        if relative_pos.dim() == 2:
            relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
        elif relative_pos.dim() == 3:
            relative_pos = relative_pos.unsqueeze(1)
        elif relative_pos.dim() != 4:
            raise ValueError(f'Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}')
        att_span = self.pos_ebd_size
        relative_pos = relative_pos.long().to(query_layer.device)
        rel_embeddings = rel_embeddings[0:att_span * 2, :].unsqueeze(0)
        if self.share_att_key:
            pos_query_layer = self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
            pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
        else:
            if 'c2p' in self.pos_att_type:
                pos_key_layer = self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
            if 'p2c' in self.pos_att_type:
                pos_query_layer = self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
        score = 0
        if 'c2p' in self.pos_att_type:
            scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor)
            c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
            c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
            c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]))
            score += c2p_att / scale.to(dtype=c2p_att.dtype)
        if 'p2c' in self.pos_att_type:
            scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
            if key_layer.size(-2) != query_layer.size(-2):
                r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), bucket_size=self.position_buckets, max_position=self.max_relative_positions, device=query_layer.device)
                r_pos = r_pos.unsqueeze(0)
            else:
                r_pos = relative_pos
            p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
            p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
            p2c_att = torch.gather(p2c_att, dim=-1, index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)])).transpose(-1, -2)
            score += p2c_att / scale.to(dtype=p2c_att.dtype)
        return score

class DebertaV2Embeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        pad_token_id = getattr(config, 'pad_token_id', 0)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
        self.position_biased_input = getattr(config, 'position_biased_input', True)
        if not self.position_biased_input:
            self.position_embeddings = None
        else:
            self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
        if config.type_vocab_size > 0:
            self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
        if self.embedding_size != config.hidden_size:
            self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
        self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
        self.dropout = StableDropout(config.hidden_dropout_prob)
        self.config = config
        self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)

    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]
        seq_length = input_shape[1]
        if position_ids is None:
            position_ids = self.position_ids[:, :seq_length]
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        if self.position_embeddings is not None:
            position_embeddings = self.position_embeddings(position_ids.long())
        else:
            position_embeddings = torch.zeros_like(inputs_embeds)
        embeddings = inputs_embeds
        if self.position_biased_input:
            embeddings += position_embeddings
        if self.config.type_vocab_size > 0:
            token_type_embeddings = self.token_type_embeddings(token_type_ids)
            embeddings += token_type_embeddings
        if self.embedding_size != self.config.hidden_size:
            embeddings = self.embed_proj(embeddings)
        embeddings = self.LayerNorm(embeddings)
        if mask is not None:
            if mask.dim() != embeddings.dim():
                if mask.dim() == 4:
                    mask = mask.squeeze(1).squeeze(1)
                mask = mask.unsqueeze(2)
            mask = mask.to(embeddings.dtype)
            embeddings = embeddings * mask
        embeddings = self.dropout(embeddings)
        return embeddings

class DebertaV2PreTrainedModel(PreTrainedModel):
    config_class = DebertaV2Config
    base_model_prefix = 'deberta'
    _keys_to_ignore_on_load_unexpected = ['position_embeddings']
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
DEBERTA_START_DOCSTRING = "\n    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled\n    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build\n    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n\n    Parameters:\n        config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
DEBERTA_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.', DEBERTA_START_DOCSTRING)
class DebertaV2Model(DebertaV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.embeddings = DebertaV2Embeddings(config)
        self.encoder = DebertaV2Encoder(config)
        self.z_steps = 0
        self.config = config
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, new_embeddings):
        self.embeddings.word_embeddings = new_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError('The prune function is not implemented in DeBERTa model.')

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        if attention_mask is None:
            attention_mask = torch.ones(input_shape, device=device)
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
        embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids, mask=attention_mask, inputs_embeds=inputs_embeds)
        encoder_outputs = self.encoder(embedding_output, attention_mask, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict)
        encoded_layers = encoder_outputs[1]
        if self.z_steps > 1:
            hidden_states = encoded_layers[-2]
            layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
            query_states = encoded_layers[-1]
            rel_embeddings = self.encoder.get_rel_embedding()
            attention_mask = self.encoder.get_attention_mask(attention_mask)
            rel_pos = self.encoder.get_rel_pos(embedding_output)
            for layer in layers[1:]:
                query_states = layer(hidden_states, attention_mask, output_attentions=False, query_states=query_states, relative_pos=rel_pos, rel_embeddings=rel_embeddings)
                encoded_layers.append(query_states)
        sequence_output = encoded_layers[-1]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1 if output_hidden_states else 2:]
        return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions)

@add_start_docstrings('DeBERTa Model with a `language modeling` head on top.', DEBERTA_START_DOCSTRING)
class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
    _tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']

    def __init__(self, config):
        super().__init__(config)
        self.deberta = DebertaV2Model(config)
        self.cls = DebertaV2OnlyMLMHead(config)
        self.post_init()

    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings
        self.cls.predictions.bias = new_embeddings.bias

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='[MASK]')
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)
        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        if not return_dict:
            output = (prediction_scores,) + outputs[1:]
            return (masked_lm_loss,) + output if masked_lm_loss is not None else output
        return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

class DebertaV2PredictionHeadTransform(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.dense = nn.Linear(config.hidden_size, self.embedding_size)
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

class DebertaV2LMPredictionHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.transform = DebertaV2PredictionHeadTransform(config)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
        self.decoder.bias = self.bias

    def _tie_weights(self):
        self.decoder.bias = self.bias

    def forward(self, hidden_states):
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states)
        return hidden_states

class DebertaV2OnlyMLMHead(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.predictions = DebertaV2LMPredictionHead(config)

    def forward(self, sequence_output):
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores

@add_start_docstrings('\n    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', DEBERTA_START_DOCSTRING)
class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        num_labels = getattr(config, 'num_labels', 2)
        self.num_labels = num_labels
        self.deberta = DebertaV2Model(config)
        self.pooler = ContextPooler(config)
        output_dim = self.pooler.output_dim
        self.classifier = nn.Linear(output_dim, num_labels)
        drop_out = getattr(config, 'cls_dropout', None)
        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
        self.dropout = StableDropout(drop_out)
        self.post_init()

    def get_input_embeddings(self):
        return self.deberta.get_input_embeddings()

    def set_input_embeddings(self, new_embeddings):
        self.deberta.set_input_embeddings(new_embeddings)

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        encoder_layer = outputs[0]
        pooled_output = self.pooler(encoder_layer)
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    loss_fn = nn.MSELoss()
                    logits = logits.view(-1).to(labels.dtype)
                    loss = loss_fn(logits, labels.view(-1))
                elif labels.dim() == 1 or labels.size(-1) == 1:
                    label_index = (labels >= 0).nonzero()
                    labels = labels.long()
                    if label_index.size(0) > 0:
                        labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
                        labels = torch.gather(labels, 0, label_index.view(-1))
                        loss_fct = CrossEntropyLoss()
                        loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
                    else:
                        loss = torch.tensor(0).to(logits)
                else:
                    log_softmax = nn.LogSoftmax(-1)
                    loss = -(log_softmax(logits) * labels).sum(-1).mean()
            elif self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', DEBERTA_START_DOCSTRING)
class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deberta = DebertaV2Model(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', DEBERTA_START_DOCSTRING)
class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deberta = DebertaV2Model(config)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1).contiguous()
        end_logits = end_logits.squeeze(-1).contiguous()
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return (total_loss,) + output if total_loss is not None else output
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n    softmax) e.g. for RocStories/SWAG tasks.\n    ', DEBERTA_START_DOCSTRING)
class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        num_labels = getattr(config, 'num_labels', 2)
        self.num_labels = num_labels
        self.deberta = DebertaV2Model(config)
        self.pooler = ContextPooler(config)
        output_dim = self.pooler.output_dim
        self.classifier = nn.Linear(output_dim, 1)
        drop_out = getattr(config, 'cls_dropout', None)
        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
        self.dropout = StableDropout(drop_out)
        self.init_weights()

    def get_input_embeddings(self):
        return self.deberta.get_input_embeddings()

    def set_input_embeddings(self, new_embeddings):
        self.deberta.set_input_embeddings(new_embeddings)

    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
        flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
        outputs = self.deberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        encoder_layer = outputs[0]
        pooled_output = self.pooler(encoder_layer)
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
        if not return_dict:
            output = (reshaped_logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
""""""
from __future__ import annotations
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput
from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, unpack_inputs
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DebertaV2Config'
_CHECKPOINT_FOR_DOC = 'kamalkraj/deberta-v2-xlarge'

class TFDebertaV2ContextPooler(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.pooler_hidden_size, name='dense')
        self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name='dropout')
        self.config = config

    def call(self, hidden_states, training: bool=False):
        context_token = hidden_states[:, 0]
        context_token = self.dropout(context_token, training=training)
        pooled_output = self.dense(context_token)
        pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
        return pooled_output

    @property
    def output_dim(self) -> int:
        return self.config.hidden_size

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.pooler_hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaV2XSoftmax(keras.layers.Layer):

    def __init__(self, axis=-1, **kwargs):
        super().__init__(**kwargs)
        self.axis = axis

    def call(self, inputs: tf.Tensor, mask: tf.Tensor):
        rmask = tf.logical_not(tf.cast(mask, tf.bool))
        output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)
        output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)
        output = tf.where(rmask, 0.0, output)
        return output

class TFDebertaV2StableDropout(keras.layers.Layer):

    def __init__(self, drop_prob, **kwargs):
        super().__init__(**kwargs)
        self.drop_prob = drop_prob

    @tf.custom_gradient
    def xdropout(self, inputs):
        mask = tf.cast(1 - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), tf.bool)
        scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)
        if self.drop_prob > 0:
            inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale

        def grad(upstream):
            if self.drop_prob > 0:
                return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale
            else:
                return upstream
        return (inputs, grad)

    def call(self, inputs: tf.Tensor, training: tf.Tensor=False):
        if training:
            return self.xdropout(inputs)
        return inputs

class TFDebertaV2SelfOutput(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(config.hidden_size, name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name='dropout')
        self.config = config

    def call(self, hidden_states, input_tensor, training: bool=False):
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaV2Attention(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.self = TFDebertaV2DisentangledSelfAttention(config, name='self')
        self.dense_output = TFDebertaV2SelfOutput(config, name='output')
        self.config = config

    def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        self_outputs = self.self(hidden_states=input_tensor, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
        if query_states is None:
            query_states = input_tensor
        attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=query_states, training=training)
        output = (attention_output,) + self_outputs[1:]
        return output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'self', None) is not None:
            with tf.name_scope(self.self.name):
                self.self.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class TFDebertaV2Intermediate(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.intermediate_act_fn = config.hidden_act
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFDebertaV2Output(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name='dropout')
        self.config = config

    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.intermediate_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

class TFDebertaV2Layer(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFDebertaV2Attention(config, name='attention')
        self.intermediate = TFDebertaV2Intermediate(config, name='intermediate')
        self.bert_output = TFDebertaV2Output(config, name='output')

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
        attention_output = attention_outputs[0]
        intermediate_output = self.intermediate(hidden_states=attention_output)
        layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training)
        outputs = (layer_output,) + attention_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'bert_output', None) is not None:
            with tf.name_scope(self.bert_output.name):
                self.bert_output.build(None)

class TFDebertaV2ConvLayer(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.kernel_size = getattr(config, 'conv_kernel_size', 3)
        self.conv_act = get_tf_activation(getattr(config, 'conv_act', 'tanh'))
        self.padding = (self.kernel_size - 1) // 2
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name='dropout')
        self.config = config

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        with tf.name_scope('conv'):
            self.conv_kernel = self.add_weight(name='kernel', shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size], initializer=get_initializer(self.config.initializer_range))
            self.conv_bias = self.add_weight(name='bias', shape=[self.config.hidden_size], initializer=tf.zeros_initializer())
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

    def call(self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool=False) -> tf.Tensor:
        out = tf.nn.conv2d(tf.expand_dims(hidden_states, 1), tf.expand_dims(self.conv_kernel, 0), strides=1, padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]])
        out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
        rmask = tf.cast(1 - input_mask, tf.bool)
        out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
        out = self.dropout(out, training=training)
        out = self.conv_act(out)
        layer_norm_input = residual_states + out
        output = self.LayerNorm(layer_norm_input)
        if input_mask is None:
            output_states = output
        else:
            if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
                if len(shape_list(input_mask)) == 4:
                    input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
                input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), dtype=self.compute_dtype)
            output_states = output * input_mask
        return output_states

class TFDebertaV2Encoder(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.layer = [TFDebertaV2Layer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)]
        self.relative_attention = getattr(config, 'relative_attention', False)
        self.config = config
        if self.relative_attention:
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.position_buckets = getattr(config, 'position_buckets', -1)
            self.pos_ebd_size = self.max_relative_positions * 2
            if self.position_buckets > 0:
                self.pos_ebd_size = self.position_buckets * 2
        self.norm_rel_ebd = [x.strip() for x in getattr(config, 'norm_rel_ebd', 'none').lower().split('|')]
        if 'layer_norm' in self.norm_rel_ebd:
            self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.conv = TFDebertaV2ConvLayer(config, name='conv') if getattr(config, 'conv_kernel_size', 0) > 0 else None

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if self.relative_attention:
            self.rel_embeddings = self.add_weight(name='rel_embeddings.weight', shape=[self.pos_ebd_size, self.config.hidden_size], initializer=get_initializer(self.config.initializer_range))
        if getattr(self, 'conv', None) is not None:
            with tf.name_scope(self.conv.name):
                self.conv.build(None)
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, self.config.hidden_size])
        if getattr(self, 'layer', None) is not None:
            for layer in self.layer:
                with tf.name_scope(layer.name):
                    layer.build(None)

    def get_rel_embedding(self):
        rel_embeddings = self.rel_embeddings if self.relative_attention else None
        if rel_embeddings is not None and 'layer_norm' in self.norm_rel_ebd:
            rel_embeddings = self.LayerNorm(rel_embeddings)
        return rel_embeddings

    def get_attention_mask(self, attention_mask):
        if len(shape_list(attention_mask)) <= 2:
            extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
            attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
            attention_mask = tf.cast(attention_mask, tf.uint8)
        elif len(shape_list(attention_mask)) == 3:
            attention_mask = tf.expand_dims(attention_mask, 1)
        return attention_mask

    def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
        if self.relative_attention and relative_pos is None:
            q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
            relative_pos = build_relative_position(q, shape_list(hidden_states)[-2], bucket_size=self.position_buckets, max_position=self.max_relative_positions)
        return relative_pos

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        if len(shape_list(attention_mask)) <= 2:
            input_mask = attention_mask
        else:
            input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        attention_mask = self.get_attention_mask(attention_mask)
        relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
        next_kv = hidden_states
        rel_embeddings = self.get_rel_embedding()
        output_states = next_kv
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (output_states,)
            layer_outputs = layer_module(hidden_states=next_kv, attention_mask=attention_mask, query_states=query_states, relative_pos=relative_pos, rel_embeddings=rel_embeddings, output_attentions=output_attentions, training=training)
            output_states = layer_outputs[0]
            if i == 0 and self.conv is not None:
                output_states = self.conv(hidden_states, output_states, input_mask)
            next_kv = output_states
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (output_states,)
        if not return_dict:
            return tuple((v for v in [output_states, all_hidden_states, all_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions)

def make_log_bucket_position(relative_pos, bucket_size, max_position):
    sign = tf.math.sign(relative_pos)
    mid = bucket_size // 2
    abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
    log_pos = tf.math.ceil(tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.cast(tf.math.log((max_position - 1) / mid), tf.float32) * tf.cast(mid - 1, tf.float32)) + tf.cast(mid, tf.float32)
    bucket_pos = tf.cast(tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32)
    return bucket_pos

def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
    q_ids = tf.range(query_size, dtype=tf.int32)
    k_ids = tf.range(key_size, dtype=tf.int32)
    rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
    if bucket_size > 0 and max_position > 0:
        rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
    rel_pos_ids = rel_pos_ids[:query_size, :]
    rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
    return tf.cast(rel_pos_ids, tf.int64)

def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
    shapes = [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(query_layer)[2], shape_list(relative_pos)[-1]]
    return tf.broadcast_to(c2p_pos, shapes)

def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
    shapes = [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(key_layer)[-2], shape_list(key_layer)[-2]]
    return tf.broadcast_to(c2p_pos, shapes)

def pos_dynamic_expand(pos_index, p2c_att, key_layer):
    shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
    return tf.broadcast_to(pos_index, shapes)

def take_along_axis(x, indices):
    if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
        one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
        gathered = tf.einsum('ijkl,ijl->ijk', one_hot_indices, x)
    else:
        gathered = tf.gather(x, indices, batch_dims=2)
    return gathered

class TFDebertaV2DisentangledSelfAttention(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        _attention_head_size = config.hidden_size // config.num_attention_heads
        self.attention_head_size = getattr(config, 'attention_head_size', _attention_head_size)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query_proj', use_bias=True)
        self.key_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key_proj', use_bias=True)
        self.value_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value_proj', use_bias=True)
        self.share_att_key = getattr(config, 'share_att_key', False)
        self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
        self.relative_attention = getattr(config, 'relative_attention', False)
        if self.relative_attention:
            self.position_buckets = getattr(config, 'position_buckets', -1)
            self.max_relative_positions = getattr(config, 'max_relative_positions', -1)
            if self.max_relative_positions < 1:
                self.max_relative_positions = config.max_position_embeddings
            self.pos_ebd_size = self.max_relative_positions
            if self.position_buckets > 0:
                self.pos_ebd_size = self.position_buckets
            self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name='pos_dropout')
            if not self.share_att_key:
                if 'c2p' in self.pos_att_type:
                    self.pos_key_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='pos_proj', use_bias=True)
                if 'p2c' in self.pos_att_type:
                    self.pos_query_proj = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='pos_q_proj')
        self.softmax = TFDebertaV2XSoftmax(axis=-1)
        self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name='dropout')
        self.config = config

    def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
        tensor_shape = shape_list(tensor)
        shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
        tensor = tf.reshape(tensor=tensor, shape=shape)
        tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
        x_shape = shape_list(tensor)
        tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
        return tensor

    def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, query_states: tf.Tensor=None, relative_pos: tf.Tensor=None, rel_embeddings: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        if query_states is None:
            query_states = hidden_states
        query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
        key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
        value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
        rel_att = None
        scale_factor = 1
        if 'c2p' in self.pos_att_type:
            scale_factor += 1
        if 'p2c' in self.pos_att_type:
            scale_factor += 1
        scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, dtype=self.compute_dtype))
        attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1]) / scale)
        if self.relative_attention:
            rel_embeddings = self.pos_dropout(rel_embeddings)
            rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
        if rel_att is not None:
            attention_scores = attention_scores + rel_att
        attention_scores = tf.reshape(attention_scores, (-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]))
        attention_probs = self.softmax(attention_scores, attention_mask)
        attention_probs = self.dropout(attention_probs, training=training)
        context_layer = tf.matmul(tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]), value_layer)
        context_layer = tf.transpose(tf.reshape(context_layer, [-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]]), [0, 2, 1, 3])
        context_layer_shape = shape_list(context_layer)
        new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
        context_layer = tf.reshape(context_layer, new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

    def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
        if relative_pos is None:
            q = shape_list(query_layer)[-2]
            relative_pos = build_relative_position(q, shape_list(key_layer)[-2], bucket_size=self.position_buckets, max_position=self.max_relative_positions)
        shape_list_pos = shape_list(relative_pos)
        if len(shape_list_pos) == 2:
            relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
        elif len(shape_list_pos) == 3:
            relative_pos = tf.expand_dims(relative_pos, 1)
        elif len(shape_list_pos) != 4:
            raise ValueError(f'Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}')
        att_span = self.pos_ebd_size
        rel_embeddings = tf.expand_dims(rel_embeddings[self.pos_ebd_size - att_span:self.pos_ebd_size + att_span, :], 0)
        if self.share_att_key:
            pos_query_layer = tf.tile(self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads), [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1])
            pos_key_layer = tf.tile(self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads), [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1])
        else:
            if 'c2p' in self.pos_att_type:
                pos_key_layer = tf.tile(self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads), [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1])
            if 'p2c' in self.pos_att_type:
                pos_query_layer = tf.tile(self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads), [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1])
        score = 0
        if 'c2p' in self.pos_att_type:
            scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, dtype=self.compute_dtype))
            c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
            c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
            c2p_att = take_along_axis(c2p_att, tf.broadcast_to(tf.squeeze(c2p_pos, 0), [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]]))
            score += c2p_att / scale
        if 'p2c' in self.pos_att_type:
            scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=self.compute_dtype))
            if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
                r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2], bucket_size=self.position_buckets, max_position=self.max_relative_positions)
                r_pos = tf.expand_dims(r_pos, 0)
            else:
                r_pos = relative_pos
            p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
            p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
            p2c_att = tf.transpose(take_along_axis(p2c_att, tf.broadcast_to(tf.squeeze(p2c_pos, 0), [shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]])), [0, 2, 1])
            score += p2c_att / scale
        return score

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'query_proj', None) is not None:
            with tf.name_scope(self.query_proj.name):
                self.query_proj.build([None, None, self.config.hidden_size])
        if getattr(self, 'key_proj', None) is not None:
            with tf.name_scope(self.key_proj.name):
                self.key_proj.build([None, None, self.config.hidden_size])
        if getattr(self, 'value_proj', None) is not None:
            with tf.name_scope(self.value_proj.name):
                self.value_proj.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'pos_dropout', None) is not None:
            with tf.name_scope(self.pos_dropout.name):
                self.pos_dropout.build(None)
        if getattr(self, 'pos_key_proj', None) is not None:
            with tf.name_scope(self.pos_key_proj.name):
                self.pos_key_proj.build([None, None, self.config.hidden_size])
        if getattr(self, 'pos_query_proj', None) is not None:
            with tf.name_scope(self.pos_query_proj.name):
                self.pos_query_proj.build([None, None, self.config.hidden_size])

class TFDebertaV2Embeddings(keras.layers.Layer):

    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.hidden_size = config.hidden_size
        self.max_position_embeddings = config.max_position_embeddings
        self.position_biased_input = getattr(config, 'position_biased_input', True)
        self.initializer_range = config.initializer_range
        if self.embedding_size != config.hidden_size:
            self.embed_proj = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='embed_proj', use_bias=False)
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name='dropout')

    def build(self, input_shape=None):
        with tf.name_scope('word_embeddings'):
            self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
        with tf.name_scope('token_type_embeddings'):
            if self.config.type_vocab_size > 0:
                self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range))
            else:
                self.token_type_embeddings = None
        with tf.name_scope('position_embeddings'):
            if self.position_biased_input:
                self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range))
            else:
                self.position_embeddings = None
        if self.built:
            return
        self.built = True
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'embed_proj', None) is not None:
            with tf.name_scope(self.embed_proj.name):
                self.embed_proj.build([None, None, self.embedding_size])

    def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, mask: tf.Tensor=None, training: bool=False) -> tf.Tensor:
        if input_ids is None and inputs_embeds is None:
            raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
        if input_ids is not None:
            check_embeddings_within_bounds(input_ids, self.config.vocab_size)
            inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
        input_shape = shape_list(inputs_embeds)[:-1]
        if token_type_ids is None:
            token_type_ids = tf.fill(dims=input_shape, value=0)
        if position_ids is None:
            position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
        final_embeddings = inputs_embeds
        if self.position_biased_input:
            position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
            final_embeddings += position_embeds
        if self.config.type_vocab_size > 0:
            token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
            final_embeddings += token_type_embeds
        if self.embedding_size != self.hidden_size:
            final_embeddings = self.embed_proj(final_embeddings)
        final_embeddings = self.LayerNorm(final_embeddings)
        if mask is not None:
            if len(shape_list(mask)) != len(shape_list(final_embeddings)):
                if len(shape_list(mask)) == 4:
                    mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
                mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype)
            final_embeddings = final_embeddings * mask
        final_embeddings = self.dropout(final_embeddings, training=training)
        return final_embeddings

class TFDebertaV2PredictionHeadTransform(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.dense = keras.layers.Dense(units=self.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.transform_act_fn = config.hidden_act
        self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm')
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])
        if getattr(self, 'LayerNorm', None) is not None:
            with tf.name_scope(self.LayerNorm.name):
                self.LayerNorm.build([None, None, self.embedding_size])

class TFDebertaV2LMPredictionHead(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embedding_size = getattr(config, 'embedding_size', config.hidden_size)
        self.transform = TFDebertaV2PredictionHeadTransform(config, name='transform')
        self.input_embeddings = input_embeddings

    def build(self, input_shape=None):
        self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias')
        if self.built:
            return
        self.built = True
        if getattr(self, 'transform', None) is not None:
            with tf.name_scope(self.transform.name):
                self.transform.build(None)

    def get_output_embeddings(self) -> keras.layers.Layer:
        return self.input_embeddings

    def set_output_embeddings(self, value: tf.Variable):
        self.input_embeddings.weight = value
        self.input_embeddings.vocab_size = shape_list(value)[0]

    def get_bias(self) -> Dict[str, tf.Variable]:
        return {'bias': self.bias}

    def set_bias(self, value: tf.Variable):
        self.bias = value['bias']
        self.config.vocab_size = shape_list(value['bias'])[0]

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.transform(hidden_states=hidden_states)
        seq_length = shape_list(hidden_states)[1]
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
        hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
        hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
        hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
        return hidden_states

class TFDebertaV2OnlyMLMHead(keras.layers.Layer):

    def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
        super().__init__(**kwargs)
        self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name='predictions')

    def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
        prediction_scores = self.predictions(hidden_states=sequence_output)
        return prediction_scores

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'predictions', None) is not None:
            with tf.name_scope(self.predictions.name):
                self.predictions.build(None)

class TFDebertaV2MainLayer(keras.layers.Layer):
    config_class = DebertaV2Config

    def __init__(self, config: DebertaV2Config, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        self.embeddings = TFDebertaV2Embeddings(config, name='embeddings')
        self.encoder = TFDebertaV2Encoder(config, name='encoder')

    def get_input_embeddings(self) -> keras.layers.Layer:
        return self.embeddings

    def set_input_embeddings(self, value: tf.Variable):
        self.embeddings.weight = value
        self.embeddings.vocab_size = shape_list(value)[0]

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    @unpack_inputs
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            input_shape = shape_list(input_ids)
        elif inputs_embeds is not None:
            input_shape = shape_list(inputs_embeds)[:-1]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        if attention_mask is None:
            attention_mask = tf.fill(dims=input_shape, value=1)
        if token_type_ids is None:
            token_type_ids = tf.fill(dims=input_shape, value=0)
        embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, mask=attention_mask, training=training)
        encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        if not return_dict:
            return (sequence_output,) + encoder_outputs[1:]
        return TFBaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)

class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
    config_class = DebertaV2Config
    base_model_prefix = 'deberta'
DEBERTA_START_DOCSTRING = '\n    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled\n    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It\'s build\n    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n    behavior.\n\n    \n\n    TensorFlow models and layers in `transformers` accept two formats as input:\n\n    - having all inputs as keyword arguments (like PyTorch models), or\n    - having all inputs as a list, tuple or dict in the first positional argument.\n\n    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n    positional argument:\n\n    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n    - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n    Note that when creating models and layers with\n    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n    about any of this, as you can just pass inputs like you would to any other Python function!\n\n    \n\n    Parameters:\n        config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DEBERTA_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.\n"

@add_start_docstrings('The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.', DEBERTA_START_DOCSTRING)
class TFDebertaV2Model(TFDebertaV2PreTrainedModel):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)

@add_start_docstrings('DeBERTa Model with a `language modeling` head on top.', DEBERTA_START_DOCSTRING)
class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        if config.is_decoder:
            logger.warning('If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')
        self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name='cls')

    def get_lm_head(self) -> keras.layers.Layer:
        return self.mlm.predictions

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'mlm', None) is not None:
            with tf.name_scope(self.mlm.name):
                self.mlm.build(None)

@add_start_docstrings('\n    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n    pooled output) e.g. for GLUE tasks.\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')
        self.pooler = TFDebertaV2ContextPooler(config, name='pooler')
        drop_out = getattr(config, 'cls_dropout', None)
        drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
        self.dropout = TFDebertaV2StableDropout(drop_out, name='cls_dropout')
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.output_dim = self.pooler.output_dim

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        pooled_output = self.pooler(sequence_output, training=training)
        pooled_output = self.dropout(pooled_output, training=training)
        logits = self.classifier(pooled_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'pooler', None) is not None:
            with tf.name_scope(self.pooler.name):
                self.pooler.build(None)
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.output_dim])

@add_start_docstrings('\n    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n    Named-Entity-Recognition (NER) tasks.\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.classifier = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output, training=training)
        logits = self.classifier(inputs=sequence_output)
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')
        self.qa_outputs = keras.layers.Dense(units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
        outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        logits = self.qa_outputs(inputs=sequence_output)
        (start_logits, end_logits) = tf.split(value=logits, num_or_size_splits=2, axis=-1)
        start_logits = tf.squeeze(input=start_logits, axis=-1)
        end_logits = tf.squeeze(input=end_logits, axis=-1)
        loss = None
        if start_positions is not None and end_positions is not None:
            labels = {'start_position': start_positions}
            labels['end_position'] = end_positions
            loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'qa_outputs', None) is not None:
            with tf.name_scope(self.qa_outputs.name):
                self.qa_outputs.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n    softmax) e.g. for RocStories/SWAG tasks.\n    ', DEBERTA_START_DOCSTRING)
class TFDebertaV2ForMultipleChoice(TFDebertaV2PreTrainedModel, TFMultipleChoiceLoss):

    def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.deberta = TFDebertaV2MainLayer(config, name='deberta')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.pooler = TFDebertaV2ContextPooler(config, name='pooler')
        self.classifier = keras.layers.Dense(units=1, kernel_initializer=get_initializer(config.initializer_range), name='classifier')
        self.output_dim = self.pooler.output_dim

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
        if input_ids is not None:
            num_choices = shape_list(input_ids)[1]
            seq_length = shape_list(input_ids)[2]
        else:
            num_choices = shape_list(inputs_embeds)[1]
            seq_length = shape_list(inputs_embeds)[2]
        flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
        flat_attention_mask = tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
        flat_token_type_ids = tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
        flat_position_ids = tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
        flat_inputs_embeds = tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None
        outputs = self.deberta(input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        pooled_output = self.pooler(sequence_output, training=training)
        pooled_output = self.dropout(pooled_output, training=training)
        logits = self.classifier(pooled_output)
        reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
        loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deberta', None) is not None:
            with tf.name_scope(self.deberta.name):
                self.deberta.build(None)
        if getattr(self, 'pooler', None) is not None:
            with tf.name_scope(self.pooler.name):
                self.pooler.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.output_dim])

# File: transformers-main/src/transformers/models/deberta_v2/tokenization_deberta_v2.py
""""""
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as sp
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'spm.model'}

class DebertaV2Tokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES

    def __init__(self, vocab_file, do_lower_case=False, split_by_punct=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
        if not os.path.isfile(vocab_file):
            raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
        self.do_lower_case = do_lower_case
        self.split_by_punct = split_by_punct
        self.vocab_file = vocab_file
        self._tokenizer = SPMTokenizer(vocab_file, None, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs)
        unk_token = AddedToken(unk_token, normalized=True, special=True) if isinstance(unk_token, str) else unk_token
        super().__init__(do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
        self._tokenizer.special_tokens = self.all_special_tokens

    @property
    def vocab_size(self):
        return len(self.vocab)

    @property
    def vocab(self):
        return self._tokenizer.vocab

    def get_vocab(self):
        vocab = self.vocab.copy()
        vocab.update(self.get_added_vocab())
        return vocab

    def _tokenize(self, text: str) -> List[str]:
        if self.do_lower_case:
            text = text.lower()
        return self._tokenizer.tokenize(text)

    def _convert_token_to_id(self, token):
        return self._tokenizer.spm.PieceToId(token)

    def _convert_id_to_token(self, index):
        return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token

    def convert_tokens_to_string(self, tokens):
        return self._tokenizer.decode(tokens)

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is not None:
            return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
        return [1] + [0] * len(token_ids_0) + [1]

    def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
        add_prefix_space = kwargs.pop('add_prefix_space', False)
        if is_split_into_words or add_prefix_space:
            text = ' ' + text
        return (text, kwargs)

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)

class SPMTokenizer:

    def __init__(self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]]=None):
        self.split_by_punct = split_by_punct
        self.vocab_file = vocab_file
        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
        spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
        if not os.path.exists(vocab_file):
            raise FileNotFoundError(f'{vocab_file} does not exist!')
        spm.load(vocab_file)
        bpe_vocab_size = spm.GetPieceSize()
        self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)}
        self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)]
        self.spm = spm
        self.special_tokens = special_tokens

    def __getstate__(self):
        state = self.__dict__.copy()
        state['spm'] = None
        return state

    def __setstate__(self, d):
        self.__dict__ = d
        if not hasattr(self, 'sp_model_kwargs'):
            self.sp_model_kwargs = {}
        self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
        self.spm.Load(self.vocab_file)

    def tokenize(self, text):
        return self._encode_as_pieces(text)

    def convert_ids_to_tokens(self, ids):
        tokens = []
        for i in ids:
            tokens.append(self.ids_to_tokens[i])
        return tokens

    def decode(self, tokens, start=-1, end=-1, raw_text=None):
        if raw_text is None:
            current_sub_tokens = []
            out_string = ''
            prev_is_special = False
            for token in tokens:
                if token in self.special_tokens:
                    if not prev_is_special:
                        out_string += ' '
                    out_string += self.spm.decode_pieces(current_sub_tokens) + token
                    prev_is_special = True
                    current_sub_tokens = []
                else:
                    current_sub_tokens.append(token)
                    prev_is_special = False
            out_string += self.spm.decode_pieces(current_sub_tokens)
            return out_string.strip()
        else:
            words = self.split_to_words(raw_text)
            word_tokens = [self.tokenize(w) for w in words]
            token2words = [0] * len(tokens)
            tid = 0
            for (i, w) in enumerate(word_tokens):
                for (k, t) in enumerate(w):
                    token2words[tid] = i
                    tid += 1
            word_start = token2words[start]
            word_end = token2words[end] if end < len(tokens) else len(words)
            text = ''.join(words[word_start:word_end])
            return text

    def add_special_token(self, token):
        if token not in self.special_tokens:
            self.special_tokens.append(token)
            if token not in self.vocab:
                self.vocab[token] = len(self.vocab) - 1
                self.ids_to_tokens.append(token)
        return self.id(token)

    def part_of_whole_word(self, token, is_bos=False):
        logger.warning_once('The `DebertaTokenizer.part_of_whole_word` method is deprecated and will be removed in `transformers==4.35`')
        if is_bos:
            return True
        if len(token) == 1 and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0])) or token in self.special_tokens:
            return False
        word_start = b'\xe2\x96\x81'.decode('utf-8')
        return not token.startswith(word_start)

    def pad(self):
        return '[PAD]'

    def bos(self):
        return '[CLS]'

    def eos(self):
        return '[SEP]'

    def unk(self):
        return '[UNK]'

    def mask(self):
        return '[MASK]'

    def sym(self, id):
        return self.ids_to_tokens[id]

    def id(self, sym):
        logger.warning_once('The `DebertaTokenizer.id` method is deprecated and will be removed in `transformers==4.35`')
        return self.vocab[sym] if sym in self.vocab else 1

    def _encode_as_pieces(self, text):
        text = convert_to_unicode(text)
        if self.split_by_punct:
            words = self._run_split_on_punc(text)
            pieces = [self.spm.encode(w, out_type=str) for w in words]
            return [p for w in pieces for p in w]
        else:
            return self.spm.encode(text, out_type=str)

    def split_to_words(self, text):
        pieces = self._encode_as_pieces(text)
        word_start = b'\xe2\x96\x81'.decode('utf-8')
        words = []
        offset = 0
        prev_end = 0
        for (i, p) in enumerate(pieces):
            if p.startswith(word_start):
                if offset > prev_end:
                    words.append(text[prev_end:offset])
                prev_end = offset
                w = p.replace(word_start, '')
            else:
                w = p
            try:
                s = text.index(w, offset)
                pn = ''
                k = i + 1
                while k < len(pieces):
                    pn = pieces[k].replace(word_start, '')
                    if len(pn) > 0:
                        break
                    k += 1
                if len(pn) > 0 and pn in text[offset:s]:
                    offset = offset + 1
                else:
                    offset = s + len(w)
            except Exception:
                offset = offset + 1
        if prev_end < offset:
            words.append(text[prev_end:offset])
        return words

    def _run_split_on_punc(self, text):
        chars = list(text)
        i = 0
        start_new_word = True
        output = []
        while i < len(chars):
            char = chars[i]
            if _is_punctuation(char):
                output.append([char])
                start_new_word = True
            else:
                if start_new_word:
                    output.append([])
                start_new_word = False
                output[-1].append(char)
            i += 1
        return [''.join(x) for x in output]

    def save_pretrained(self, path: str, filename_prefix: str=None):
        filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]]
        if filename_prefix is not None:
            filename = filename_prefix + '-' + filename
        full_path = os.path.join(path, filename)
        with open(full_path, 'wb') as fs:
            fs.write(self.spm.serialized_model_proto())
        return (full_path,)

def _is_whitespace(char):
    if char == ' ' or char == '\t' or char == '\n' or (char == '\r'):
        return True
    cat = unicodedata.category(char)
    if cat == 'Zs':
        return True
    return False

def _is_control(char):
    if char == '\t' or char == '\n' or char == '\r':
        return False
    cat = unicodedata.category(char)
    if cat.startswith('C'):
        return True
    return False

def _is_punctuation(char):
    cp = ord(char)
    if cp >= 33 and cp <= 47 or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
        return True
    cat = unicodedata.category(char)
    if cat.startswith('P'):
        return True
    return False

def convert_to_unicode(text):
    if isinstance(text, str):
        return text
    elif isinstance(text, bytes):
        return text.decode('utf-8', 'ignore')
    else:
        raise TypeError(f'Unsupported string type: {type(text)}')

# File: transformers-main/src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
""""""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
    from .tokenization_deberta_v2 import DebertaV2Tokenizer
else:
    DebertaV2Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'spm.model', 'tokenizer_file': 'tokenizer.json'}

class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
    vocab_files_names = VOCAB_FILES_NAMES
    slow_tokenizer_class = DebertaV2Tokenizer

    def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, split_by_punct=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs) -> None:
        super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs)
        self.do_lower_case = do_lower_case
        self.split_by_punct = split_by_punct
        self.vocab_file = vocab_file

    @property
    def can_save_slow_tokenizer(self) -> bool:
        return os.path.isfile(self.vocab_file) if self.vocab_file else False

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        cls = [self.cls_token_id]
        sep = [self.sep_token_id]
        return cls + token_ids_0 + sep + token_ids_1 + sep

    def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
        if already_has_special_tokens:
            return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
        if token_ids_1 is not None:
            return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
        return [1] + [0] * len(token_ids_0) + [1]

    def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
        sep = [self.sep_token_id]
        cls = [self.cls_token_id]
        if token_ids_1 is None:
            return len(cls + token_ids_0 + sep) * [0]
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        if not self.can_save_slow_tokenizer:
            raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
        if not os.path.isdir(save_directory):
            logger.error(f'Vocabulary path ({save_directory}) should be a directory')
            return
        out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        return (out_vocab_file,)

# File: transformers-main/src/transformers/models/decision_transformer/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {'configuration_decision_transformer': ['DecisionTransformerConfig']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_decision_transformer'] = ['DecisionTransformerGPT2Model', 'DecisionTransformerGPT2PreTrainedModel', 'DecisionTransformerModel', 'DecisionTransformerPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_decision_transformer import DecisionTransformerConfig
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_decision_transformer import DecisionTransformerGPT2Model, DecisionTransformerGPT2PreTrainedModel, DecisionTransformerModel, DecisionTransformerPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/decision_transformer/configuration_decision_transformer.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class DecisionTransformerConfig(PretrainedConfig):
    model_type = 'decision_transformer'
    keys_to_ignore_at_inference = ['past_key_values']
    attribute_map = {'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}

    def __init__(self, state_dim=17, act_dim=4, hidden_size=128, max_ep_len=4096, action_tanh=True, vocab_size=1, n_positions=1024, n_layer=3, n_head=1, n_inner=None, activation_function='relu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
        self.state_dim = state_dim
        self.act_dim = act_dim
        self.hidden_size = hidden_size
        self.max_ep_len = max_ep_len
        self.action_tanh = action_tanh
        self.vocab_size = vocab_size
        self.n_positions = n_positions
        self.n_layer = n_layer
        self.n_head = n_head
        self.n_inner = n_inner
        self.activation_function = activation_function
        self.resid_pdrop = resid_pdrop
        self.embd_pdrop = embd_pdrop
        self.attn_pdrop = attn_pdrop
        self.layer_norm_epsilon = layer_norm_epsilon
        self.initializer_range = initializer_range
        self.scale_attn_weights = scale_attn_weights
        self.use_cache = use_cache
        self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
        self.reorder_and_upcast_attn = reorder_and_upcast_attn
        self.bos_token_id = bos_token_id
        self.eos_token_id = eos_token_id
        super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)

# File: transformers-main/src/transformers/models/decision_transformer/modeling_decision_transformer.py
""""""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_decision_transformer import DecisionTransformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'edbeeching/decision-transformer-gym-hopper-medium'
_CONFIG_FOR_DOC = 'DecisionTransformerConfig'

def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
    try:
        import re
        import tensorflow as tf
    except ImportError:
        logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.')
        raise
    tf_path = os.path.abspath(gpt2_checkpoint_path)
    logger.info(f'Converting TensorFlow checkpoint from {tf_path}')
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for (name, shape) in init_vars:
        logger.info(f'Loading TF weight {name} with shape {shape}')
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array.squeeze())
    for (name, array) in zip(names, arrays):
        name = name[6:]
        name = name.split('/')
        pointer = model
        for m_name in name:
            if re.fullmatch('[A-Za-z]+\\d+', m_name):
                scope_names = re.split('(\\d+)', m_name)
            else:
                scope_names = [m_name]
            if scope_names[0] == 'w' or scope_names[0] == 'g':
                pointer = getattr(pointer, 'weight')
            elif scope_names[0] == 'b':
                pointer = getattr(pointer, 'bias')
            elif scope_names[0] == 'wpe' or scope_names[0] == 'wte':
                pointer = getattr(pointer, scope_names[0])
                pointer = getattr(pointer, 'weight')
            else:
                pointer = getattr(pointer, scope_names[0])
            if len(scope_names) >= 2:
                num = int(scope_names[1])
                pointer = pointer[num]
        try:
            if pointer.shape != array.shape:
                raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched')
        except ValueError as e:
            e.args += (pointer.shape, array.shape)
            raise
        logger.info(f'Initialize PyTorch weight {name}')
        pointer.data = torch.from_numpy(array)
    return model

class DecisionTransformerGPT2Attention(nn.Module):

    def __init__(self, config, is_cross_attention=False, layer_idx=None):
        super().__init__()
        self.config = config
        max_positions = config.max_position_embeddings
        self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False)
        self.register_buffer('masked_bias', torch.tensor(-10000.0), persistent=False)
        self.embed_dim = config.hidden_size
        self.num_heads = config.num_attention_heads
        self.head_dim = self.embed_dim // self.num_heads
        self.split_size = self.embed_dim
        if self.head_dim * self.num_heads != self.embed_dim:
            raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
        self.scale_attn_weights = config.scale_attn_weights
        self.is_cross_attention = is_cross_attention
        self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
        self.layer_idx = layer_idx
        self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
        if self.is_cross_attention:
            self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
            self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
        else:
            self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
        self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
        self.is_causal = True
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
        index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size])
        self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
        self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
        self.split_size = self.split_size // self.num_heads * (self.num_heads - len(heads))
        self.num_heads = self.num_heads - len(heads)
        self.pruned_heads = self.pruned_heads.union(heads)

    def _attn(self, query, key, value, attention_mask=None, head_mask=None):
        attn_weights = torch.matmul(query, key.transpose(-1, -2))
        if self.scale_attn_weights:
            attn_weights = attn_weights / torch.full([], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device)
        if self.scale_attn_by_inverse_layer_idx:
            attn_weights = attn_weights / float(self.layer_idx + 1)
        if not self.is_cross_attention:
            (query_length, key_length) = (query.size(-2), key.size(-2))
            causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length]
            mask_value = torch.finfo(attn_weights.dtype).min
            mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
            attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
        if attention_mask is not None:
            attn_weights = attn_weights + attention_mask
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        attn_weights = attn_weights.type(value.dtype)
        attn_weights = self.attn_dropout(attn_weights)
        if head_mask is not None:
            attn_weights = attn_weights * head_mask
        attn_output = torch.matmul(attn_weights, value)
        return (attn_output, attn_weights)

    def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
        (bsz, num_heads, q_seq_len, dk) = query.size()
        (_, _, k_seq_len, _) = key.size()
        attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
        scale_factor = 1.0
        if self.scale_attn_weights:
            scale_factor /= float(value.size(-1)) ** 0.5
        if self.scale_attn_by_inverse_layer_idx:
            scale_factor /= float(self.layer_idx + 1)
        with torch.amp.autocast(query.device.type, enabled=False):
            (q, k) = (query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len))
            attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
            attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
        if not self.is_cross_attention:
            (query_length, key_length) = (query.size(-2), key.size(-2))
            causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length]
            mask_value = torch.finfo(attn_weights.dtype).min
            mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
            attn_weights = torch.where(causal_mask, attn_weights, mask_value)
        if attention_mask is not None:
            attn_weights = attn_weights + attention_mask
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if attn_weights.dtype != torch.float32:
            raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32')
        attn_weights = attn_weights.type(value.dtype)
        attn_weights = self.attn_dropout(attn_weights)
        if head_mask is not None:
            attn_weights = attn_weights * head_mask
        attn_output = torch.matmul(attn_weights, value)
        return (attn_output, attn_weights)

    def _split_heads(self, tensor, num_heads, attn_head_size):
        new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
        tensor = tensor.view(new_shape)
        return tensor.permute(0, 2, 1, 3)

    def _merge_heads(self, tensor, num_heads, attn_head_size):
        tensor = tensor.permute(0, 2, 1, 3).contiguous()
        new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
        return tensor.view(new_shape)

    def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
        if encoder_hidden_states is not None:
            if not hasattr(self, 'q_attn'):
                raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`.')
            query = self.q_attn(hidden_states)
            (key, value) = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
            attention_mask = encoder_attention_mask
        else:
            (query, key, value) = self.c_attn(hidden_states).split(self.split_size, dim=2)
        query = self._split_heads(query, self.num_heads, self.head_dim)
        key = self._split_heads(key, self.num_heads, self.head_dim)
        value = self._split_heads(value, self.num_heads, self.head_dim)
        if layer_past is not None:
            (past_key, past_value) = layer_past
            key = torch.cat((past_key, key), dim=-2)
            value = torch.cat((past_value, value), dim=-2)
        if use_cache is True:
            present = (key, value)
        else:
            present = None
        if self.reorder_and_upcast_attn:
            (attn_output, attn_weights) = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
        else:
            (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask)
        attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
        attn_output = self.c_proj(attn_output)
        attn_output = self.resid_dropout(attn_output)
        outputs = (attn_output, present)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class DecisionTransformerGPT2MLP(nn.Module):

    def __init__(self, intermediate_size, config):
        super().__init__()
        embed_dim = config.hidden_size
        self.c_fc = Conv1D(intermediate_size, embed_dim)
        self.c_proj = Conv1D(embed_dim, intermediate_size)
        self.act = ACT2FN[config.activation_function]
        self.dropout = nn.Dropout(config.resid_pdrop)

    def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
        hidden_states = self.c_fc(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states = self.c_proj(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class DecisionTransformerGPT2Block(nn.Module):

    def __init__(self, config, layer_idx=None):
        super().__init__()
        hidden_size = config.hidden_size
        inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
        self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
        self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
        self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
        if config.add_cross_attention:
            self.crossattention = DecisionTransformerGPT2Attention(config, is_cross_attention=True, layer_idx=layer_idx)
            self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
        self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)

    def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
        residual = hidden_states
        hidden_states = self.ln_1(hidden_states)
        attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions)
        attn_output = attn_outputs[0]
        outputs = attn_outputs[1:]
        hidden_states = attn_output + residual
        if encoder_hidden_states is not None:
            if not hasattr(self, 'crossattention'):
                raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
            residual = hidden_states
            hidden_states = self.ln_cross_attn(hidden_states)
            cross_attn_outputs = self.crossattention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
            attn_output = cross_attn_outputs[0]
            hidden_states = residual + attn_output
            outputs = outputs + cross_attn_outputs[2:]
        residual = hidden_states
        hidden_states = self.ln_2(hidden_states)
        feed_forward_hidden_states = self.mlp(hidden_states)
        hidden_states = residual + feed_forward_hidden_states
        if use_cache:
            outputs = (hidden_states,) + outputs
        else:
            outputs = (hidden_states,) + outputs[1:]
        return outputs

class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
    config_class = DecisionTransformerConfig
    load_tf_weights = load_tf_weights_in_gpt2
    base_model_prefix = 'transformer'
    is_parallelizable = True
    supports_gradient_checkpointing = True

    def __init__(self, *inputs, **kwargs):
        super().__init__(*inputs, **kwargs)

    def _init_weights(self, module):
        if isinstance(module, (nn.Linear, Conv1D)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        for (name, p) in module.named_parameters():
            if 'c_proj' in name and 'weight' in name:
                p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer))

class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.embed_dim = config.hidden_size
        self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
        self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
        self.drop = nn.Dropout(config.embd_pdrop)
        self.h = nn.ModuleList([DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
        self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
        self.model_parallel = False
        self.device_map = None
        self.gradient_checkpointing = False
        self.post_init()

    def get_input_embeddings(self):
        return self.wte

    def set_input_embeddings(self, new_embeddings):
        self.wte = new_embeddings

    def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
        elif input_ids is not None:
            self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
            input_shape = input_ids.size()
            input_ids = input_ids.view(-1, input_shape[-1])
            batch_size = input_ids.shape[0]
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
            batch_size = inputs_embeds.shape[0]
        else:
            raise ValueError('You have to specify either input_ids or inputs_embeds')
        device = input_ids.device if input_ids is not None else inputs_embeds.device
        if token_type_ids is not None:
            token_type_ids = token_type_ids.view(-1, input_shape[-1])
        if past_key_values is None:
            past_length = 0
            past_key_values = tuple([None] * len(self.h))
        else:
            past_length = past_key_values[0][0].size(-2)
        if position_ids is None:
            position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
            position_ids = position_ids.unsqueeze(0)
        if attention_mask is not None:
            if batch_size <= 0:
                raise ValueError('batch_size has to be defined and > 0')
            attention_mask = attention_mask.view(batch_size, -1)
            attention_mask = attention_mask[:, None, None, :]
            attention_mask = attention_mask.to(dtype=self.dtype)
            attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
        if self.config.add_cross_attention and encoder_hidden_states is not None:
            (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size()
            encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
            if encoder_attention_mask is None:
                encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
            encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
        else:
            encoder_attention_mask = None
        head_mask = self.get_head_mask(head_mask, self.config.n_layer)
        if inputs_embeds is None:
            inputs_embeds = self.wte(input_ids)
        position_embeds = self.wpe(position_ids)
        hidden_states = inputs_embeds + position_embeds
        if token_type_ids is not None:
            token_type_embeds = self.wte(token_type_ids)
            hidden_states = hidden_states + token_type_embeds
        hidden_states = self.drop(hidden_states)
        output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
        if self.gradient_checkpointing and self.training:
            if use_cache:
                logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
                use_cache = False
        presents = () if use_cache else None
        all_self_attentions = () if output_attentions else None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
        all_hidden_states = () if output_hidden_states else None
        for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)):
            if self.model_parallel:
                torch.cuda.set_device(hidden_states.device)
                if layer_past is not None:
                    layer_past = tuple((past_state.to(hidden_states.device) for past_state in layer_past))
                if attention_mask is not None:
                    attention_mask = attention_mask.to(hidden_states.device)
                if isinstance(head_mask, torch.Tensor):
                    head_mask = head_mask.to(hidden_states.device)
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            if self.gradient_checkpointing and self.training:
                outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions)
            else:
                outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions)
            hidden_states = outputs[0]
            if use_cache is True:
                presents = presents + (outputs[1],)
            if output_attentions:
                all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
            if self.model_parallel:
                for (k, v) in self.device_map.items():
                    if i == v[-1] and 'cuda:' + str(k) != self.last_device:
                        hidden_states = hidden_states.to('cuda:' + str(k + 1))
        hidden_states = self.ln_f(hidden_states)
        hidden_states = hidden_states.view(output_shape)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
        return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)

@dataclass
class DecisionTransformerOutput(ModelOutput):
    state_preds: torch.FloatTensor = None
    action_preds: torch.FloatTensor = None
    return_preds: torch.FloatTensor = None
    hidden_states: torch.FloatTensor = None
    attentions: torch.FloatTensor = None
    last_hidden_state: torch.FloatTensor = None

class DecisionTransformerPreTrainedModel(PreTrainedModel):
    config_class = DecisionTransformerConfig
    base_model_prefix = 'decision_transformer'
    main_input_name = 'states'
    supports_gradient_checkpointing = False

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
DECISION_TRANSFORMER_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DECISION_TRANSFORMER_INPUTS_DOCSTRING = '\n    Args:\n        states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):\n            The states for each step in the trajectory\n        actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):\n            The actions taken by the "expert" policy for the current state, these are masked for auto regressive\n            prediction\n        rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):\n            The rewards for each state, action\n        returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):\n            The returns for each state in the trajectory\n        timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):\n            The timestep for each step in the trajectory\n        attention_mask (`torch.FloatTensor` of shape `(batch_size, episode_length)`):\n            Masking, used to mask the actions when performing autoregressive prediction\n'

@add_start_docstrings('The Decision Transformer Model', DECISION_TRANSFORMER_START_DOCSTRING)
class DecisionTransformerModel(DecisionTransformerPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.hidden_size = config.hidden_size
        self.encoder = DecisionTransformerGPT2Model(config)
        self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
        self.embed_return = torch.nn.Linear(1, config.hidden_size)
        self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
        self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
        self.embed_ln = nn.LayerNorm(config.hidden_size)
        self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
        self.predict_action = nn.Sequential(*[nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
        self.predict_return = torch.nn.Linear(config.hidden_size, 1)
        self.post_init()

    @add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, states: Optional[torch.FloatTensor]=None, actions: Optional[torch.FloatTensor]=None, rewards: Optional[torch.FloatTensor]=None, returns_to_go: Optional[torch.FloatTensor]=None, timesteps: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        (batch_size, seq_length) = (states.shape[0], states.shape[1])
        if attention_mask is None:
            attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
        state_embeddings = self.embed_state(states)
        action_embeddings = self.embed_action(actions)
        returns_embeddings = self.embed_return(returns_to_go)
        time_embeddings = self.embed_timestep(timesteps)
        state_embeddings = state_embeddings + time_embeddings
        action_embeddings = action_embeddings + time_embeddings
        returns_embeddings = returns_embeddings + time_embeddings
        stacked_inputs = torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1).permute(0, 2, 1, 3).reshape(batch_size, 3 * seq_length, self.hidden_size)
        stacked_inputs = self.embed_ln(stacked_inputs)
        stacked_attention_mask = torch.stack((attention_mask, attention_mask, attention_mask), dim=1).permute(0, 2, 1).reshape(batch_size, 3 * seq_length)
        device = stacked_inputs.device
        encoder_outputs = self.encoder(inputs_embeds=stacked_inputs, attention_mask=stacked_attention_mask, position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        x = encoder_outputs[0]
        x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
        return_preds = self.predict_return(x[:, 2])
        state_preds = self.predict_state(x[:, 2])
        action_preds = self.predict_action(x[:, 1])
        if not return_dict:
            return (state_preds, action_preds, return_preds)
        return DecisionTransformerOutput(last_hidden_state=encoder_outputs.last_hidden_state, state_preds=state_preds, action_preds=action_preds, return_preds=return_preds, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

# File: transformers-main/src/transformers/models/deformable_detr/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {'configuration_deformable_detr': ['DeformableDetrConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['feature_extraction_deformable_detr'] = ['DeformableDetrFeatureExtractor']
    _import_structure['image_processing_deformable_detr'] = ['DeformableDetrImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_deformable_detr'] = ['DeformableDetrForObjectDetection', 'DeformableDetrModel', 'DeformableDetrPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_deformable_detr import DeformableDetrConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor
        from .image_processing_deformable_detr import DeformableDetrImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_deformable_detr import DeformableDetrForObjectDetection, DeformableDetrModel, DeformableDetrPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deformable_detr/configuration_deformable_detr.py
""""""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)

class DeformableDetrConfig(PretrainedConfig):
    model_type = 'deformable_detr'
    attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}

    def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, max_position_embeddings=1024, encoder_layers=6, encoder_ffn_dim=1024, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=1024, decoder_attention_heads=8, encoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, return_intermediate=True, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=False, two_stage_num_proposals=300, with_box_refine=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, focal_alpha=0.25, disable_custom_kernels=False, **kwargs):
        if use_timm_backbone and backbone_kwargs is None:
            backbone_kwargs = {}
            if dilation:
                backbone_kwargs['output_stride'] = 16
            backbone_kwargs['out_indices'] = [2, 3, 4] if num_feature_levels > 1 else [4]
            backbone_kwargs['in_chans'] = num_channels
        elif not use_timm_backbone and backbone in (None, 'resnet50'):
            if backbone_config is None:
                logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
                backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4'])
            elif isinstance(backbone_config, dict):
                backbone_model_type = backbone_config.get('model_type')
                config_class = CONFIG_MAPPING[backbone_model_type]
                backbone_config = config_class.from_dict(backbone_config)
        verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
        self.use_timm_backbone = use_timm_backbone
        self.backbone_config = backbone_config
        self.num_channels = num_channels
        self.num_queries = num_queries
        self.max_position_embeddings = max_position_embeddings
        self.d_model = d_model
        self.encoder_ffn_dim = encoder_ffn_dim
        self.encoder_layers = encoder_layers
        self.encoder_attention_heads = encoder_attention_heads
        self.decoder_ffn_dim = decoder_ffn_dim
        self.decoder_layers = decoder_layers
        self.decoder_attention_heads = decoder_attention_heads
        self.dropout = dropout
        self.attention_dropout = attention_dropout
        self.activation_dropout = activation_dropout
        self.activation_function = activation_function
        self.init_std = init_std
        self.init_xavier_std = init_xavier_std
        self.encoder_layerdrop = encoder_layerdrop
        self.auxiliary_loss = auxiliary_loss
        self.position_embedding_type = position_embedding_type
        self.backbone = backbone
        self.use_pretrained_backbone = use_pretrained_backbone
        self.backbone_kwargs = backbone_kwargs
        self.dilation = dilation
        self.num_feature_levels = num_feature_levels
        self.encoder_n_points = encoder_n_points
        self.decoder_n_points = decoder_n_points
        self.two_stage = two_stage
        self.two_stage_num_proposals = two_stage_num_proposals
        self.with_box_refine = with_box_refine
        if two_stage is True and with_box_refine is False:
            raise ValueError('If two_stage is True, with_box_refine must be True.')
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        self.mask_loss_coefficient = mask_loss_coefficient
        self.dice_loss_coefficient = dice_loss_coefficient
        self.bbox_loss_coefficient = bbox_loss_coefficient
        self.giou_loss_coefficient = giou_loss_coefficient
        self.eos_coefficient = eos_coefficient
        self.focal_alpha = focal_alpha
        self.disable_custom_kernels = disable_custom_kernels
        super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)

    @property
    def num_attention_heads(self) -> int:
        return self.encoder_attention_heads

    @property
    def hidden_size(self) -> int:
        return self.d_model

# File: transformers-main/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py
""""""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def rename_key(orig_key):
    if 'backbone.0.body' in orig_key:
        orig_key = orig_key.replace('backbone.0.body', 'backbone.conv_encoder.model')
    if 'transformer' in orig_key:
        orig_key = orig_key.replace('transformer.', '')
    if 'norm1' in orig_key:
        if 'encoder' in orig_key:
            orig_key = orig_key.replace('norm1', 'self_attn_layer_norm')
        else:
            orig_key = orig_key.replace('norm1', 'encoder_attn_layer_norm')
    if 'norm2' in orig_key:
        if 'encoder' in orig_key:
            orig_key = orig_key.replace('norm2', 'final_layer_norm')
        else:
            orig_key = orig_key.replace('norm2', 'self_attn_layer_norm')
    if 'norm3' in orig_key:
        orig_key = orig_key.replace('norm3', 'final_layer_norm')
    if 'linear1' in orig_key:
        orig_key = orig_key.replace('linear1', 'fc1')
    if 'linear2' in orig_key:
        orig_key = orig_key.replace('linear2', 'fc2')
    if 'query_embed' in orig_key:
        orig_key = orig_key.replace('query_embed', 'query_position_embeddings')
    if 'cross_attn' in orig_key:
        orig_key = orig_key.replace('cross_attn', 'encoder_attn')
    return orig_key

def read_in_q_k_v(state_dict):
    for i in range(6):
        in_proj_weight = state_dict.pop(f'decoder.layers.{i}.self_attn.in_proj_weight')
        in_proj_bias = state_dict.pop(f'decoder.layers.{i}.self_attn.in_proj_bias')
        state_dict[f'decoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :]
        state_dict[f'decoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256]
        state_dict[f'decoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :]
        state_dict[f'decoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512]
        state_dict[f'decoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :]
        state_dict[f'decoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:]

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_deformable_detr_checkpoint(checkpoint_path, single_scale, dilation, with_box_refine, two_stage, pytorch_dump_folder_path, push_to_hub):
    config = DeformableDetrConfig()
    if single_scale:
        config.num_feature_levels = 1
    config.dilation = dilation
    config.with_box_refine = with_box_refine
    config.two_stage = two_stage
    config.num_labels = 91
    repo_id = 'huggingface/label-files'
    filename = 'coco-detection-id2label.json'
    id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type='dataset')).read_text())
    id2label = {int(k): v for (k, v) in id2label.items()}
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    image_processor = DeformableDetrImageProcessor(format='coco_detection')
    img = prepare_img()
    encoding = image_processor(images=img, return_tensors='pt')
    pixel_values = encoding['pixel_values']
    logger.info('Converting model...')
    state_dict = torch.load(checkpoint_path, map_location='cpu')['model']
    for key in state_dict.copy().keys():
        val = state_dict.pop(key)
        state_dict[rename_key(key)] = val
    read_in_q_k_v(state_dict)
    prefix = 'model.'
    for key in state_dict.copy().keys():
        if not key.startswith('class_embed') and (not key.startswith('bbox_embed')):
            val = state_dict.pop(key)
            state_dict[prefix + key] = val
    model = DeformableDetrForObjectDetection(config)
    model.load_state_dict(state_dict)
    model.eval()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.to(device)
    outputs = model(pixel_values.to(device))
    expected_logits = torch.tensor([[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]])
    expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.315, 0.5489, 0.5845], [0.5563, 0.758, 0.8518]])
    if single_scale:
        expected_logits = torch.tensor([[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.847, -7.7003]])
        expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]])
    if single_scale and dilation:
        expected_logits = torch.tensor([[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]])
        expected_boxes = torch.tensor([[0.7665, 0.413, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]])
    if with_box_refine:
        expected_logits = torch.tensor([[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]])
        expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]])
    if with_box_refine and two_stage:
        expected_logits = torch.tensor([[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.724], [-6.9315, -4.4735, -6.2298]])
        expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.549, 0.2763, 0.0564]])
    print('Logits:', outputs.logits[0, :3, :3])
    assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=0.0001)
    assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=0.0001)
    print('Everything ok!')
    logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...')
    Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
    model.save_pretrained(pytorch_dump_folder_path)
    image_processor.save_pretrained(pytorch_dump_folder_path)
    if push_to_hub:
        model_name = 'deformable-detr'
        model_name += '-single-scale' if single_scale else ''
        model_name += '-dc5' if dilation else ''
        model_name += '-with-box-refine' if with_box_refine else ''
        model_name += '-two-stage' if two_stage else ''
        print('Pushing model to hub...')
        model.push_to_hub(repo_path_or_name=model_name, organization='nielsr', commit_message='Add model')
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_path', type=str, default='/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth', help="Path to Pytorch checkpoint (.pth file) you'd like to convert.")
    parser.add_argument('--single_scale', action='store_true', help='Whether to set config.num_features_levels = 1.')
    parser.add_argument('--dilation', action='store_true', help='Whether to set config.dilation=True.')
    parser.add_argument('--with_box_refine', action='store_true', help='Whether to set config.with_box_refine=True.')
    parser.add_argument('--two_stage', action='store_true', help='Whether to set config.two_stage=True.')
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to output PyTorch model.')
    parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.')
    args = parser.parse_args()
    convert_deformable_detr_checkpoint(args.checkpoint_path, args.single_scale, args.dilation, args.with_box_refine, args.two_stage, args.pytorch_dump_folder_path, args.push_to_hub)

# File: transformers-main/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py
""""""
import warnings
from ...image_transforms import rgb_to_id as _rgb_to_id
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
logger = logging.get_logger(__name__)

def rgb_to_id(x):
    warnings.warn('rgb_to_id has moved and will not be importable from this module from v5. Please import from transformers.image_transforms instead.', FutureWarning)
    return _rgb_to_id(x)

class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor):

    def __init__(self, *args, **kwargs) -> None:
        warnings.warn('The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DeformableDetrImageProcessor instead.', FutureWarning)
        super().__init__(*args, **kwargs)

# File: transformers-main/src/transformers/models/deformable_detr/image_processing_deformable_detr.py
""""""
import io
import pathlib
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import BaseImageProcessor, get_size_dict
from ...image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments
from ...utils import TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_torch_available():
    import torch
    from torch import nn
if is_vision_available():
    import PIL
if is_scipy_available():
    import scipy.special
    import scipy.stats
logger = logging.get_logger(__name__)
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)

def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
    (height, width) = image_size
    raw_size = None
    if max_size is not None:
        min_original_size = float(min((height, width)))
        max_original_size = float(max((height, width)))
        if max_original_size / min_original_size * size > max_size:
            raw_size = max_size * min_original_size / max_original_size
            size = int(round(raw_size))
    if height <= width and height == size or (width <= height and width == size):
        (oh, ow) = (height, width)
    elif width < height:
        ow = size
        if max_size is not None and raw_size is not None:
            oh = int(raw_size * height / width)
        else:
            oh = int(size * height / width)
    else:
        oh = size
        if max_size is not None and raw_size is not None:
            ow = int(raw_size * width / height)
        else:
            ow = int(size * width / height)
    return (oh, ow)

def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    if isinstance(size, (list, tuple)):
        return size
    return get_size_with_aspect_ratio(image_size, size, max_size)

def get_image_size_for_max_height_width(input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    (height, width) = image_size
    height_scale = max_height / height
    width_scale = max_width / width
    min_scale = min(height_scale, width_scale)
    new_height = int(height * min_scale)
    new_width = int(width * min_scale)
    return (new_height, new_width)

def get_numpy_to_framework_fn(arr) -> Callable:
    if isinstance(arr, np.ndarray):
        return np.array
    if is_tf_available() and is_tf_tensor(arr):
        import tensorflow as tf
        return tf.convert_to_tensor
    if is_torch_available() and is_torch_tensor(arr):
        import torch
        return torch.tensor
    if is_flax_available() and is_jax_tensor(arr):
        import jax.numpy as jnp
        return jnp.array
    raise ValueError(f'Cannot convert arrays of type {type(arr)}')

def safe_squeeze(arr: np.ndarray, axis: Optional[int]=None) -> np.ndarray:
    if axis is None:
        return arr.squeeze()
    try:
        return arr.squeeze(axis=axis)
    except ValueError:
        return arr

def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
    (image_height, image_width) = image_size
    norm_annotation = {}
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            boxes = corners_to_center_format(boxes)
            boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
            norm_annotation[key] = boxes
        else:
            norm_annotation[key] = value
    return norm_annotation

def max_across_indices(values: Iterable[Any]) -> List[Any]:
    return [max(values_i) for values_i in zip(*values)]

def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]:
    if input_data_format is None:
        input_data_format = infer_channel_dimension_format(images[0])
    if input_data_format == ChannelDimension.FIRST:
        (_, max_height, max_width) = max_across_indices([img.shape for img in images])
    elif input_data_format == ChannelDimension.LAST:
        (max_height, max_width, _) = max_across_indices([img.shape for img in images])
    else:
        raise ValueError(f'Invalid channel dimension format: {input_data_format}')
    return (max_height, max_width)

def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
    (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
    mask = np.zeros(output_size, dtype=np.int64)
    mask[:input_height, :input_width] = 1
    return mask

def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
    try:
        from pycocotools import mask as coco_mask
    except ImportError:
        raise ImportError('Pycocotools is not installed in your environment.')
    masks = []
    for polygons in segmentations:
        rles = coco_mask.frPyObjects(polygons, height, width)
        mask = coco_mask.decode(rles)
        if len(mask.shape) < 3:
            mask = mask[..., None]
        mask = np.asarray(mask, dtype=np.uint8)
        mask = np.any(mask, axis=2)
        masks.append(mask)
    if masks:
        masks = np.stack(masks, axis=0)
    else:
        masks = np.zeros((0, height, width), dtype=np.uint8)
    return masks

def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool=False, input_data_format: Optional[Union[ChannelDimension, str]]=None):
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    image_id = target['image_id']
    image_id = np.asarray([image_id], dtype=np.int64)
    annotations = target['annotations']
    annotations = [obj for obj in annotations if 'iscrowd' not in obj or obj['iscrowd'] == 0]
    classes = [obj['category_id'] for obj in annotations]
    classes = np.asarray(classes, dtype=np.int64)
    area = np.asarray([obj['area'] for obj in annotations], dtype=np.float32)
    iscrowd = np.asarray([obj['iscrowd'] if 'iscrowd' in obj else 0 for obj in annotations], dtype=np.int64)
    boxes = [obj['bbox'] for obj in annotations]
    boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
    boxes[:, 2:] += boxes[:, :2]
    boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
    boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
    keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
    new_target = {}
    new_target['image_id'] = image_id
    new_target['class_labels'] = classes[keep]
    new_target['boxes'] = boxes[keep]
    new_target['area'] = area[keep]
    new_target['iscrowd'] = iscrowd[keep]
    new_target['orig_size'] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
    if annotations and 'keypoints' in annotations[0]:
        keypoints = [obj['keypoints'] for obj in annotations]
        keypoints = np.asarray(keypoints, dtype=np.float32)
        keypoints = keypoints[keep]
        num_keypoints = keypoints.shape[0]
        keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
        new_target['keypoints'] = keypoints
    if return_segmentation_masks:
        segmentation_masks = [obj['segmentation'] for obj in annotations]
        masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
        new_target['masks'] = masks[keep]
    return new_target

def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
    if masks.size == 0:
        return np.zeros((0, 4))
    (h, w) = masks.shape[-2:]
    y = np.arange(0, h, dtype=np.float32)
    x = np.arange(0, w, dtype=np.float32)
    (y, x) = np.meshgrid(y, x, indexing='ij')
    x_mask = masks * np.expand_dims(x, axis=0)
    x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
    x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool))
    x_min = x.filled(fill_value=100000000.0)
    x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
    y_mask = masks * np.expand_dims(y, axis=0)
    y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
    y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool))
    y_min = y.filled(fill_value=100000000.0)
    y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
    return np.stack([x_min, y_min, x_max, y_max], 1)

def prepare_coco_panoptic_annotation(image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool=True, input_data_format: Union[ChannelDimension, str]=None) -> Dict:
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    annotation_path = pathlib.Path(masks_path) / target['file_name']
    new_target = {}
    new_target['image_id'] = np.asarray([target['image_id'] if 'image_id' in target else target['id']], dtype=np.int64)
    new_target['size'] = np.asarray([image_height, image_width], dtype=np.int64)
    new_target['orig_size'] = np.asarray([image_height, image_width], dtype=np.int64)
    if 'segments_info' in target:
        masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
        masks = rgb_to_id(masks)
        ids = np.array([segment_info['id'] for segment_info in target['segments_info']])
        masks = masks == ids[:, None, None]
        masks = masks.astype(np.uint8)
        if return_masks:
            new_target['masks'] = masks
        new_target['boxes'] = masks_to_boxes(masks)
        new_target['class_labels'] = np.array([segment_info['category_id'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['iscrowd'] = np.asarray([segment_info['iscrowd'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['area'] = np.asarray([segment_info['area'] for segment_info in target['segments_info']], dtype=np.float32)
    return new_target

def get_segmentation_image(masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False):
    (h, w) = input_size
    (final_h, final_w) = target_size
    m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
    if m_id.shape[-1] == 0:
        m_id = np.zeros((h, w), dtype=np.int64)
    else:
        m_id = m_id.argmax(-1).reshape(h, w)
    if deduplicate:
        for equiv in stuff_equiv_classes.values():
            for eq_id in equiv:
                m_id[m_id == eq_id] = equiv[0]
    seg_img = id_to_rgb(m_id)
    seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
    return seg_img

def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
    (final_h, final_w) = target_size
    np_seg_img = seg_img.astype(np.uint8)
    np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
    m_id = rgb_to_id(np_seg_img)
    area = [(m_id == i).sum() for i in range(n_classes)]
    return area

def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
    probs = scipy.special.softmax(logits, axis=-1)
    labels = probs.argmax(-1, keepdims=True)
    scores = np.take_along_axis(probs, labels, axis=-1)
    (scores, labels) = (scores.squeeze(-1), labels.squeeze(-1))
    return (scores, labels)

def post_process_panoptic_sample(out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85) -> Dict:
    (scores, labels) = score_labels_from_class_probabilities(out_logits)
    keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
    cur_scores = scores[keep]
    cur_classes = labels[keep]
    cur_boxes = center_to_corners_format(boxes[keep])
    if len(cur_boxes) != len(cur_classes):
        raise ValueError('Not as many boxes as there are classes')
    cur_masks = masks[keep]
    cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
    cur_masks = safe_squeeze(cur_masks, 1)
    (b, h, w) = cur_masks.shape
    cur_masks = cur_masks.reshape(b, -1)
    stuff_equiv_classes = defaultdict(list)
    for (k, label) in enumerate(cur_classes):
        if not is_thing_map[label]:
            stuff_equiv_classes[label].append(k)
    seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
    area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
    if cur_classes.size() > 0:
        filtered_small = np.array([a <= 4 for a in area], dtype=bool)
        while filtered_small.any():
            cur_masks = cur_masks[~filtered_small]
            cur_scores = cur_scores[~filtered_small]
            cur_classes = cur_classes[~filtered_small]
            seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
            area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
            filtered_small = np.array([a <= 4 for a in area], dtype=bool)
    else:
        cur_classes = np.ones((1, 1), dtype=np.int64)
    segments_info = [{'id': i, 'isthing': is_thing_map[cat], 'category_id': int(cat), 'area': a} for (i, (cat, a)) in enumerate(zip(cur_classes, area))]
    del cur_classes
    with io.BytesIO() as out:
        PIL.Image.fromarray(seg_img).save(out, format='PNG')
        predictions = {'png_string': out.getvalue(), 'segments_info': segments_info}
    return predictions

def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):
    ratios = tuple((float(s) / float(s_orig) for (s, s_orig) in zip(target_size, orig_size)))
    (ratio_height, ratio_width) = ratios
    new_annotation = {}
    new_annotation['size'] = target_size
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
            new_annotation['boxes'] = scaled_boxes
        elif key == 'area':
            area = value
            scaled_area = area * (ratio_width * ratio_height)
            new_annotation['area'] = scaled_area
        elif key == 'masks':
            masks = value[:, None]
            masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
            masks = masks.astype(np.float32)
            masks = masks[:, 0] > threshold
            new_annotation['masks'] = masks
        elif key == 'size':
            new_annotation['size'] = target_size
        else:
            new_annotation[key] = value
    return new_annotation

def binary_mask_to_rle(mask):
    if is_torch_tensor(mask):
        mask = mask.numpy()
    pixels = mask.flatten()
    pixels = np.concatenate([[0], pixels, [0]])
    runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
    runs[1::2] -= runs[::2]
    return list(runs)

def convert_segmentation_to_rle(segmentation):
    segment_ids = torch.unique(segmentation)
    run_length_encodings = []
    for idx in segment_ids:
        mask = torch.where(segmentation == idx, 1, 0)
        rle = binary_mask_to_rle(mask)
        run_length_encodings.append(rle)
    return run_length_encodings

def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
    if not masks.shape[0] == scores.shape[0] == labels.shape[0]:
        raise ValueError('mask, scores and labels must have the same shape!')
    to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
    return (masks[to_keep], scores[to_keep], labels[to_keep])

def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
    mask_k = mask_labels == k
    mask_k_area = mask_k.sum()
    original_area = (mask_probs[k] >= mask_threshold).sum()
    mask_exists = mask_k_area > 0 and original_area > 0
    if mask_exists:
        area_ratio = mask_k_area / original_area
        if not area_ratio.item() > overlap_mask_area_threshold:
            mask_exists = False
    return (mask_exists, mask_k)

def compute_segments(mask_probs, pred_scores, pred_labels, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_size: Tuple[int, int]=None):
    height = mask_probs.shape[1] if target_size is None else target_size[0]
    width = mask_probs.shape[2] if target_size is None else target_size[1]
    segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
    segments: List[Dict] = []
    if target_size is not None:
        mask_probs = nn.functional.interpolate(mask_probs.unsqueeze(0), size=target_size, mode='bilinear', align_corners=False)[0]
    current_segment_id = 0
    mask_probs *= pred_scores.view(-1, 1, 1)
    mask_labels = mask_probs.argmax(0)
    stuff_memory_list: Dict[str, int] = {}
    for k in range(pred_labels.shape[0]):
        pred_class = pred_labels[k].item()
        should_fuse = pred_class in label_ids_to_fuse
        (mask_exists, mask_k) = check_segment_validity(mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold)
        if mask_exists:
            if pred_class in stuff_memory_list:
                current_segment_id = stuff_memory_list[pred_class]
            else:
                current_segment_id += 1
            segmentation[mask_k] = current_segment_id
            segment_score = round(pred_scores[k].item(), 6)
            segments.append({'id': current_segment_id, 'label_id': pred_class, 'was_fused': should_fuse, 'score': segment_score})
            if should_fuse:
                stuff_memory_list[pred_class] = current_segment_id
    return (segmentation, segments)

class DeformableDetrImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values', 'pixel_mask']

    def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Union[float, List[float]]=None, image_std: Union[float, List[float]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None:
        if 'pad_and_return_pixel_mask' in kwargs:
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.")
            max_size = kwargs.pop('max_size')
        else:
            max_size = None if size is None else 1333
        size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333}
        size = get_size_dict(size, max_size=max_size, default_to_square=False)
        if do_convert_annotations is None:
            do_convert_annotations = do_normalize
        super().__init__(**kwargs)
        self.format = format
        self.do_resize = do_resize
        self.size = size
        self.resample = resample
        self.do_rescale = do_rescale
        self.rescale_factor = rescale_factor
        self.do_normalize = do_normalize
        self.do_convert_annotations = do_convert_annotations
        self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
        self.do_pad = do_pad
        self.pad_size = pad_size
        self._valid_processor_keys = ['images', 'annotations', 'return_segmentation_masks', 'masks_path', 'do_resize', 'size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'do_convert_annotations', 'image_mean', 'image_std', 'do_pad', 'pad_size', 'format', 'return_tensors', 'data_format', 'input_data_format']

    @classmethod
    def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
        image_processor_dict = image_processor_dict.copy()
        if 'max_size' in kwargs:
            image_processor_dict['max_size'] = kwargs.pop('max_size')
        if 'pad_and_return_pixel_mask' in kwargs:
            image_processor_dict['pad_and_return_pixel_mask'] = kwargs.pop('pad_and_return_pixel_mask')
        return super().from_dict(image_processor_dict, **kwargs)

    def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict:
        format = format if format is not None else self.format
        if format == AnnotationFormat.COCO_DETECTION:
            return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format)
        elif format == AnnotationFormat.COCO_PANOPTIC:
            return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format)
        else:
            raise ValueError(f'Format {format} is not supported.')
        return target

    def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.")
            max_size = kwargs.pop('max_size')
        else:
            max_size = None
        size = get_size_dict(size, max_size=max_size, default_to_square=False)
        if 'shortest_edge' in size and 'longest_edge' in size:
            new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format)
        elif 'max_height' in size and 'max_width' in size:
            new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format)
        elif 'height' in size and 'width' in size:
            new_size = (size['height'], size['width'])
        else:
            raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
        image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
        return image

    def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict:
        return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)

    def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
        return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)

    def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
        return normalize_annotation(annotation, image_size=image_size)

    def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict:
        new_annotation = {}
        new_annotation['size'] = output_image_size
        for (key, value) in annotation.items():
            if key == 'masks':
                masks = value
                masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST)
                masks = safe_squeeze(masks, 1)
                new_annotation['masks'] = masks
            elif key == 'boxes' and update_bboxes:
                boxes = value
                boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]])
                new_annotation['boxes'] = boxes
            elif key == 'size':
                new_annotation['size'] = output_image_size
            else:
                new_annotation[key] = value
        return new_annotation

    def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray:
        (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
        (output_height, output_width) = output_size
        pad_bottom = output_height - input_height
        pad_right = output_width - input_width
        padding = ((0, pad_bottom), (0, pad_right))
        padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
        if annotation is not None:
            annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes)
        return (padded_image, annotation)

    def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:
        pad_size = pad_size if pad_size is not None else self.pad_size
        if pad_size is not None:
            padded_size = (pad_size['height'], pad_size['width'])
        else:
            padded_size = get_max_height_width(images, input_data_format=input_data_format)
        annotation_list = annotations if annotations is not None else [None] * len(images)
        padded_images = []
        padded_annotations = []
        for (image, annotation) in zip(images, annotation_list):
            (padded_image, padded_annotation) = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)
            padded_images.append(padded_image)
            padded_annotations.append(padded_annotation)
        data = {'pixel_values': padded_images}
        if return_pixel_mask:
            masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]
            data['pixel_mask'] = masks
        encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
        if annotations is not None:
            encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]
        return encoded_inputs

    def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature:
        if 'pad_and_return_pixel_mask' in kwargs:
            logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.')
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        max_size = None
        if 'max_size' in kwargs:
            logger.warning_once("The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.")
            size = kwargs.pop('max_size')
        do_resize = self.do_resize if do_resize is None else do_resize
        size = self.size if size is None else size
        size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
        resample = self.resample if resample is None else resample
        do_rescale = self.do_rescale if do_rescale is None else do_rescale
        rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
        do_normalize = self.do_normalize if do_normalize is None else do_normalize
        image_mean = self.image_mean if image_mean is None else image_mean
        image_std = self.image_std if image_std is None else image_std
        do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
        do_pad = self.do_pad if do_pad is None else do_pad
        pad_size = self.pad_size if pad_size is None else pad_size
        format = self.format if format is None else format
        images = make_list_of_images(images)
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
        if annotations is not None and isinstance(annotations, dict):
            annotations = [annotations]
        if annotations is not None and len(images) != len(annotations):
            raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.')
        format = AnnotationFormat(format)
        if annotations is not None:
            validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
        if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))):
            raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.')
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        if annotations is not None:
            prepared_images = []
            prepared_annotations = []
            for (image, target) in zip(images, annotations):
                target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format)
                prepared_images.append(image)
                prepared_annotations.append(target)
            images = prepared_images
            annotations = prepared_annotations
            del prepared_images, prepared_annotations
        if do_resize:
            if annotations is not None:
                (resized_images, resized_annotations) = ([], [])
                for (image, target) in zip(images, annotations):
                    orig_size = get_image_size(image, input_data_format)
                    resized_image = self.resize(image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format)
                    resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format))
                    resized_images.append(resized_image)
                    resized_annotations.append(resized_annotation)
                images = resized_images
                annotations = resized_annotations
                del resized_images, resized_annotations
            else:
                images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
        if do_rescale:
            images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
        if do_normalize:
            images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]
        if do_convert_annotations and annotations is not None:
            annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for (annotation, image) in zip(annotations, images)]
        if do_pad:
            encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size)
        else:
            images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
            encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
            if annotations is not None:
                encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations]
        return encoded_inputs

    def post_process(self, outputs, target_sizes):
        logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.')
        (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes)
        if len(out_logits) != len(target_sizes):
            raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
        if target_sizes.shape[1] != 2:
            raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
        prob = out_logits.sigmoid()
        (topk_values, topk_indexes) = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
        scores = topk_values
        topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
        labels = topk_indexes % out_logits.shape[2]
        boxes = center_to_corners_format(out_bbox)
        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
        (img_h, img_w) = target_sizes.unbind(1)
        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
        boxes = boxes * scale_fct[:, None, :]
        results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)]
        return results

    def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):
        (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes)
        if target_sizes is not None:
            if len(out_logits) != len(target_sizes):
                raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
        prob = out_logits.sigmoid()
        prob = prob.view(out_logits.shape[0], -1)
        k_value = min(top_k, prob.size(1))
        (topk_values, topk_indexes) = torch.topk(prob, k_value, dim=1)
        scores = topk_values
        topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor')
        labels = topk_indexes % out_logits.shape[2]
        boxes = center_to_corners_format(out_bbox)
        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
        if target_sizes is not None:
            if isinstance(target_sizes, List):
                img_h = torch.Tensor([i[0] for i in target_sizes])
                img_w = torch.Tensor([i[1] for i in target_sizes])
            else:
                (img_h, img_w) = target_sizes.unbind(1)
            scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
            boxes = boxes * scale_fct[:, None, :]
        results = []
        for (s, l, b) in zip(scores, labels, boxes):
            score = s[s > threshold]
            label = l[s > threshold]
            box = b[s > threshold]
            results.append({'scores': score, 'labels': label, 'boxes': box})
        return results

# File: transformers-main/src/transformers/models/deformable_detr/load_custom.py
""""""
import os
from pathlib import Path

def load_cuda_kernels():
    from torch.utils.cpp_extension import load
    root = Path(__file__).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
    src_files = [root / filename for filename in ['vision.cpp', os.path.join('cpu', 'ms_deform_attn_cpu.cpp'), os.path.join('cuda', 'ms_deform_attn_cuda.cu')]]
    load('MultiScaleDeformableAttention', src_files, with_cuda=True, extra_include_paths=[str(root)], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'])
    import MultiScaleDeformableAttention as MSDA
    return MSDA

# File: transformers-main/src/transformers/models/deformable_detr/modeling_deformable_detr.py
""""""
import copy
import math
import os
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from ...activations import ACT2FN
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import meshgrid
from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_ninja_available, is_scipy_available, is_timm_available, is_torch_cuda_available, is_vision_available, logging, replace_return_docstrings, requires_backends
from ...utils.backbone_utils import load_backbone
from .configuration_deformable_detr import DeformableDetrConfig
logger = logging.get_logger(__name__)
MultiScaleDeformableAttention = None

def load_cuda_kernels():
    from torch.utils.cpp_extension import load
    global MultiScaleDeformableAttention
    root = Path(__file__).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
    src_files = [root / filename for filename in ['vision.cpp', os.path.join('cpu', 'ms_deform_attn_cpu.cpp'), os.path.join('cuda', 'ms_deform_attn_cuda.cu')]]
    MultiScaleDeformableAttention = load('MultiScaleDeformableAttention', src_files, with_cuda=True, extra_include_paths=[str(root)], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'])
if is_vision_available():
    from transformers.image_transforms import center_to_corners_format
if is_accelerate_available():
    from accelerate import PartialState
    from accelerate.utils import reduce
if is_timm_available():
    from timm import create_model
if is_scipy_available():
    from scipy.optimize import linear_sum_assignment
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DeformableDetrConfig'
_CHECKPOINT_FOR_DOC = 'sensetime/deformable-detr'

class MultiScaleDeformableAttentionFunction(Function):

    @staticmethod
    def forward(context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
        context.im2col_step = im2col_step
        output = MultiScaleDeformableAttention.ms_deform_attn_forward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step)
        context.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
        return output

    @staticmethod
    @once_differentiable
    def backward(context, grad_output):
        (value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) = context.saved_tensors
        (grad_value, grad_sampling_loc, grad_attn_weight) = MultiScaleDeformableAttention.ms_deform_attn_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step)
        return (grad_value, None, None, grad_sampling_loc, grad_attn_weight, None)

@dataclass
class DeformableDetrDecoderOutput(ModelOutput):
    last_hidden_state: torch.FloatTensor = None
    intermediate_hidden_states: torch.FloatTensor = None
    intermediate_reference_points: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None

@dataclass
class DeformableDetrModelOutput(ModelOutput):
    init_reference_points: torch.FloatTensor = None
    last_hidden_state: torch.FloatTensor = None
    intermediate_hidden_states: torch.FloatTensor = None
    intermediate_reference_points: torch.FloatTensor = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    enc_outputs_class: Optional[torch.FloatTensor] = None
    enc_outputs_coord_logits: Optional[torch.FloatTensor] = None

@dataclass
class DeformableDetrObjectDetectionOutput(ModelOutput):
    loss: Optional[torch.FloatTensor] = None
    loss_dict: Optional[Dict] = None
    logits: torch.FloatTensor = None
    pred_boxes: torch.FloatTensor = None
    auxiliary_outputs: Optional[List[Dict]] = None
    init_reference_points: Optional[torch.FloatTensor] = None
    last_hidden_state: Optional[torch.FloatTensor] = None
    intermediate_hidden_states: Optional[torch.FloatTensor] = None
    intermediate_reference_points: Optional[torch.FloatTensor] = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    enc_outputs_class: Optional = None
    enc_outputs_coord_logits: Optional = None

def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])

def inverse_sigmoid(x, eps=1e-05):
    x = x.clamp(min=0, max=1)
    x1 = x.clamp(min=eps)
    x2 = (1 - x).clamp(min=eps)
    return torch.log(x1 / x2)

class DeformableDetrFrozenBatchNorm2d(nn.Module):

    def __init__(self, n):
        super().__init__()
        self.register_buffer('weight', torch.ones(n))
        self.register_buffer('bias', torch.zeros(n))
        self.register_buffer('running_mean', torch.zeros(n))
        self.register_buffer('running_var', torch.ones(n))

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
        num_batches_tracked_key = prefix + 'num_batches_tracked'
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]
        super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)

    def forward(self, x):
        weight = self.weight.reshape(1, -1, 1, 1)
        bias = self.bias.reshape(1, -1, 1, 1)
        running_var = self.running_var.reshape(1, -1, 1, 1)
        running_mean = self.running_mean.reshape(1, -1, 1, 1)
        epsilon = 1e-05
        scale = weight * (running_var + epsilon).rsqrt()
        bias = bias - running_mean * scale
        return x * scale + bias

def replace_batch_norm(model):
    for (name, module) in model.named_children():
        if isinstance(module, nn.BatchNorm2d):
            new_module = DeformableDetrFrozenBatchNorm2d(module.num_features)
            if not module.weight.device == torch.device('meta'):
                new_module.weight.data.copy_(module.weight)
                new_module.bias.data.copy_(module.bias)
                new_module.running_mean.data.copy_(module.running_mean)
                new_module.running_var.data.copy_(module.running_var)
            model._modules[name] = new_module
        if len(list(module.children())) > 0:
            replace_batch_norm(module)

class DeformableDetrConvEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        if config.use_timm_backbone:
            requires_backends(self, ['timm'])
            kwargs = getattr(config, 'backbone_kwargs', {})
            kwargs = {} if kwargs is None else kwargs.copy()
            out_indices = kwargs.pop('out_indices', (2, 3, 4) if config.num_feature_levels > 1 else (4,))
            num_channels = kwargs.pop('in_chans', config.num_channels)
            if config.dilation:
                kwargs['output_stride'] = kwargs.get('output_stride', 16)
            backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs)
        else:
            backbone = load_backbone(config)
        with torch.no_grad():
            replace_batch_norm(backbone)
        self.model = backbone
        self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
        backbone_model_type = None
        if config.backbone is not None:
            backbone_model_type = config.backbone
        elif config.backbone_config is not None:
            backbone_model_type = config.backbone_config.model_type
        else:
            raise ValueError('Either `backbone` or `backbone_config` should be provided in the config')
        if 'resnet' in backbone_model_type:
            for (name, parameter) in self.model.named_parameters():
                if config.use_timm_backbone:
                    if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name):
                        parameter.requires_grad_(False)
                elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name):
                    parameter.requires_grad_(False)

    def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
        features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
        out = []
        for feature_map in features:
            mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
            out.append((feature_map, mask))
        return out

class DeformableDetrConvModel(nn.Module):

    def __init__(self, conv_encoder, position_embedding):
        super().__init__()
        self.conv_encoder = conv_encoder
        self.position_embedding = position_embedding

    def forward(self, pixel_values, pixel_mask):
        out = self.conv_encoder(pixel_values, pixel_mask)
        pos = []
        for (feature_map, mask) in out:
            pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
        return (out, pos)

class DeformableDetrSinePositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.temperature = temperature
        self.normalize = normalize
        if scale is not None and normalize is False:
            raise ValueError('normalize should be True if scale is passed')
        if scale is None:
            scale = 2 * math.pi
        self.scale = scale

    def forward(self, pixel_values, pixel_mask):
        if pixel_mask is None:
            raise ValueError('No pixel mask provided')
        y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
        x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
        if self.normalize:
            eps = 1e-06
            y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
            x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
        dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
        dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
        pos_x = x_embed[:, :, :, None] / dim_t
        pos_y = y_embed[:, :, :, None] / dim_t
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
        return pos

class DeformableDetrLearnedPositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=256):
        super().__init__()
        self.row_embeddings = nn.Embedding(50, embedding_dim)
        self.column_embeddings = nn.Embedding(50, embedding_dim)

    def forward(self, pixel_values, pixel_mask=None):
        (height, width) = pixel_values.shape[-2:]
        width_values = torch.arange(width, device=pixel_values.device)
        height_values = torch.arange(height, device=pixel_values.device)
        x_emb = self.column_embeddings(width_values)
        y_emb = self.row_embeddings(height_values)
        pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
        pos = pos.permute(2, 0, 1)
        pos = pos.unsqueeze(0)
        pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
        return pos

def build_position_encoding(config):
    n_steps = config.d_model // 2
    if config.position_embedding_type == 'sine':
        position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True)
    elif config.position_embedding_type == 'learned':
        position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps)
    else:
        raise ValueError(f'Not supported {config.position_embedding_type}')
    return position_embedding

def multi_scale_deformable_attention(value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor) -> Tensor:
    (batch_size, _, num_heads, hidden_dim) = value.shape
    (_, num_queries, num_heads, num_levels, num_points, _) = sampling_locations.shape
    value_list = value.split([height.item() * width.item() for (height, width) in value_spatial_shapes], dim=1)
    sampling_grids = 2 * sampling_locations - 1
    sampling_value_list = []
    for (level_id, (height, width)) in enumerate(value_spatial_shapes):
        value_l_ = value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width)
        sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
        sampling_value_l_ = nn.functional.grid_sample(value_l_, sampling_grid_l_, mode='bilinear', padding_mode='zeros', align_corners=False)
        sampling_value_list.append(sampling_value_l_)
    attention_weights = attention_weights.transpose(1, 2).reshape(batch_size * num_heads, 1, num_queries, num_levels * num_points)
    output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(batch_size, num_heads * hidden_dim, num_queries)
    return output.transpose(1, 2).contiguous()

class DeformableDetrMultiscaleDeformableAttention(nn.Module):

    def __init__(self, config: DeformableDetrConfig, num_heads: int, n_points: int):
        super().__init__()
        kernel_loaded = MultiScaleDeformableAttention is not None
        if is_torch_cuda_available() and is_ninja_available() and (not kernel_loaded):
            try:
                load_cuda_kernels()
            except Exception as e:
                logger.warning(f'Could not load the custom kernel for multi-scale deformable attention: {e}')
        if config.d_model % num_heads != 0:
            raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}')
        dim_per_head = config.d_model // num_heads
        if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
            warnings.warn("You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
        self.im2col_step = 64
        self.d_model = config.d_model
        self.n_levels = config.num_feature_levels
        self.n_heads = num_heads
        self.n_points = n_points
        self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
        self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
        self.value_proj = nn.Linear(config.d_model, config.d_model)
        self.output_proj = nn.Linear(config.d_model, config.d_model)
        self.disable_custom_kernels = config.disable_custom_kernels
        self._reset_parameters()

    def _reset_parameters(self):
        nn.init.constant_(self.sampling_offsets.weight.data, 0.0)
        default_dtype = torch.get_default_dtype()
        thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
        for i in range(self.n_points):
            grid_init[:, :, i, :] *= i + 1
        with torch.no_grad():
            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
        nn.init.constant_(self.attention_weights.weight.data, 0.0)
        nn.init.constant_(self.attention_weights.bias.data, 0.0)
        nn.init.xavier_uniform_(self.value_proj.weight.data)
        nn.init.constant_(self.value_proj.bias.data, 0.0)
        nn.init.xavier_uniform_(self.output_proj.weight.data)
        nn.init.constant_(self.output_proj.bias.data, 0.0)

    def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
        return tensor if position_embeddings is None else tensor + position_embeddings

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
        if position_embeddings is not None:
            hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
        (batch_size, num_queries, _) = hidden_states.shape
        (batch_size, sequence_length, _) = encoder_hidden_states.shape
        if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
            raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
        value = self.value_proj(encoder_hidden_states)
        if attention_mask is not None:
            value = value.masked_fill(~attention_mask[..., None], float(0))
        value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
        sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
        attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
        attention_weights = F.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
        num_coordinates = reference_points.shape[-1]
        if num_coordinates == 2:
            offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
            sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
        elif num_coordinates == 4:
            sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
        else:
            raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
        if self.disable_custom_kernels:
            output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
        else:
            try:
                output = MultiScaleDeformableAttentionFunction.apply(value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step)
            except Exception:
                output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
        output = self.output_proj(output)
        return (output, attention_weights)

class DeformableDetrMultiheadAttention(nn.Module):

    def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
        return tensor if position_embeddings is None else tensor + position_embeddings

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        (batch_size, target_len, embed_dim) = hidden_states.size()
        if position_embeddings is not None:
            hidden_states_original = hidden_states
            hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
        query_states = self.q_proj(hidden_states) * self.scaling
        key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
        value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
        proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
        query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
        key_states = key_states.view(*proj_shape)
        value_states = value_states.view(*proj_shape)
        source_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
            raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
        if attention_mask is not None:
            if attention_mask.size() != (batch_size, 1, target_len, source_len):
                raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
            attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
            attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped)

class DeformableDetrEncoderLayer(nn.Module):

    def __init__(self, config: DeformableDetrConfig):
        super().__init__()
        self.embed_dim = config.d_model
        self.self_attn = DeformableDetrMultiscaleDeformableAttention(config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points)
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
        self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
        residual = hidden_states
        (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        if self.training:
            if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
                clamp_value = torch.finfo(hidden_states.dtype).max - 1000
                hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class DeformableDetrDecoderLayer(nn.Module):

    def __init__(self, config: DeformableDetrConfig):
        super().__init__()
        self.embed_dim = config.d_model
        self.self_attn = DeformableDetrMultiheadAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.encoder_attn = DeformableDetrMultiscaleDeformableAttention(config, num_heads=config.decoder_attention_heads, n_points=config.decoder_n_points)
        self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
        self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
        residual = hidden_states
        (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        second_residual = hidden_states
        cross_attn_weights = None
        (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = second_residual + hidden_states
        hidden_states = self.encoder_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights, cross_attn_weights)
        return outputs

class DeformableDetrPreTrainedModel(PreTrainedModel):
    config_class = DeformableDetrConfig
    base_model_prefix = 'model'
    main_input_name = 'pixel_values'
    supports_gradient_checkpointing = True
    _no_split_modules = ['DeformableDetrConvEncoder', 'DeformableDetrEncoderLayer', 'DeformableDetrDecoderLayer']

    def _init_weights(self, module):
        std = self.config.init_std
        if isinstance(module, DeformableDetrLearnedPositionEmbedding):
            nn.init.uniform_(module.row_embeddings.weight)
            nn.init.uniform_(module.column_embeddings.weight)
        elif isinstance(module, DeformableDetrMultiscaleDeformableAttention):
            module._reset_parameters()
        elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        if hasattr(module, 'reference_points') and (not self.config.two_stage):
            nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
            nn.init.constant_(module.reference_points.bias.data, 0.0)
        if hasattr(module, 'level_embed'):
            nn.init.normal_(module.level_embed)
DEFORMABLE_DETR_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`DeformableDetrConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DEFORMABLE_DETR_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Padding will be ignored by default should you provide it.\n\n            Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`]\n            for details.\n\n        pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n            Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n            - 1 for pixels that are real (i.e. **not masked**),\n            - 0 for pixels that are padding (i.e. **masked**).\n\n            [What are attention masks?](../glossary#attention-mask)\n\n        decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n            Not used by default. Can be used to mask object queries.\n        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n            can choose to directly pass a flattened representation of an image.\n        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n            Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n            embedded representation.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'

class DeformableDetrEncoder(DeformableDetrPreTrainedModel):

    def __init__(self, config: DeformableDetrConfig):
        super().__init__(config)
        self.gradient_checkpointing = False
        self.dropout = config.dropout
        self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
        self.post_init()

    @staticmethod
    def get_reference_points(spatial_shapes, valid_ratios, device):
        reference_points_list = []
        for (level, (height, width)) in enumerate(spatial_shapes):
            (ref_y, ref_x) = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), indexing='ij')
            ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
            ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
            ref = torch.stack((ref_x, ref_y), -1)
            reference_points_list.append(ref)
        reference_points = torch.cat(reference_points_list, 1)
        reference_points = reference_points[:, :, None] * valid_ratios[:, None]
        return reference_points

    def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        hidden_states = inputs_embeds
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
        encoder_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, encoder_layer) in enumerate(self.layers):
            if output_hidden_states:
                encoder_states = encoder_states + (hidden_states,)
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, position_embeddings, reference_points, spatial_shapes, level_start_index, output_attentions)
            else:
                layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            encoder_states = encoder_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)

class DeformableDetrDecoder(DeformableDetrPreTrainedModel):

    def __init__(self, config: DeformableDetrConfig):
        super().__init__(config)
        self.dropout = config.dropout
        self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
        self.gradient_checkpointing = False
        self.bbox_embed = None
        self.class_embed = None
        self.post_init()

    def forward(self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if inputs_embeds is not None:
            hidden_states = inputs_embeds
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
        intermediate = ()
        intermediate_reference_points = ()
        for (idx, decoder_layer) in enumerate(self.layers):
            num_coordinates = reference_points.shape[-1]
            if num_coordinates == 4:
                reference_points_input = reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
            elif reference_points.shape[-1] == 2:
                reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]
            else:
                raise ValueError("Reference points' last dimension must be of size 2")
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, position_embeddings, reference_points_input, spatial_shapes, level_start_index, encoder_hidden_states, encoder_attention_mask, output_attentions)
            else:
                layer_outputs = decoder_layer(hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
            hidden_states = layer_outputs[0]
            if self.bbox_embed is not None:
                tmp = self.bbox_embed[idx](hidden_states)
                num_coordinates = reference_points.shape[-1]
                if num_coordinates == 4:
                    new_reference_points = tmp + inverse_sigmoid(reference_points)
                    new_reference_points = new_reference_points.sigmoid()
                elif num_coordinates == 2:
                    new_reference_points = tmp
                    new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
                    new_reference_points = new_reference_points.sigmoid()
                else:
                    raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
                reference_points = new_reference_points.detach()
            intermediate += (hidden_states,)
            intermediate_reference_points += (reference_points,)
            if output_attentions:
                all_self_attns += (layer_outputs[1],)
                if encoder_hidden_states is not None:
                    all_cross_attentions += (layer_outputs[2],)
        intermediate = torch.stack(intermediate, dim=1)
        intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
        return DeformableDetrDecoderOutput(last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)

@add_start_docstrings('\n    The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n    hidden-states without any specific head on top.\n    ', DEFORMABLE_DETR_START_DOCSTRING)
class DeformableDetrModel(DeformableDetrPreTrainedModel):

    def __init__(self, config: DeformableDetrConfig):
        super().__init__(config)
        backbone = DeformableDetrConvEncoder(config)
        position_embeddings = build_position_encoding(config)
        self.backbone = DeformableDetrConvModel(backbone, position_embeddings)
        if config.num_feature_levels > 1:
            num_backbone_outs = len(backbone.intermediate_channel_sizes)
            input_proj_list = []
            for _ in range(num_backbone_outs):
                in_channels = backbone.intermediate_channel_sizes[_]
                input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model)))
            for _ in range(config.num_feature_levels - num_backbone_outs):
                input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model)))
                in_channels = config.d_model
            self.input_proj = nn.ModuleList(input_proj_list)
        else:
            self.input_proj = nn.ModuleList([nn.Sequential(nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model))])
        if not config.two_stage:
            self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2)
        self.encoder = DeformableDetrEncoder(config)
        self.decoder = DeformableDetrDecoder(config)
        self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
        if config.two_stage:
            self.enc_output = nn.Linear(config.d_model, config.d_model)
            self.enc_output_norm = nn.LayerNorm(config.d_model)
            self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2)
            self.pos_trans_norm = nn.LayerNorm(config.d_model * 2)
        else:
            self.reference_points = nn.Linear(config.d_model, 2)
        self.post_init()

    def get_encoder(self):
        return self.encoder

    def get_decoder(self):
        return self.decoder

    def freeze_backbone(self):
        for (name, param) in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(False)

    def unfreeze_backbone(self):
        for (name, param) in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(True)

    def get_valid_ratio(self, mask, dtype=torch.float32):
        (_, height, width) = mask.shape
        valid_height = torch.sum(mask[:, :, 0], 1)
        valid_width = torch.sum(mask[:, 0, :], 1)
        valid_ratio_height = valid_height.to(dtype) / height
        valid_ratio_width = valid_width.to(dtype) / width
        valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
        return valid_ratio

    def get_proposal_pos_embed(self, proposals):
        num_pos_feats = self.config.d_model // 2
        temperature = 10000
        scale = 2 * math.pi
        dim_t = torch.arange(num_pos_feats, dtype=torch.int64, device=proposals.device).float()
        dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats)
        proposals = proposals.sigmoid() * scale
        pos = proposals[:, :, :, None] / dim_t
        pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
        return pos

    def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
        batch_size = enc_output.shape[0]
        proposals = []
        _cur = 0
        for (level, (height, width)) in enumerate(spatial_shapes):
            mask_flatten_ = padding_mask[:, _cur:_cur + height * width].view(batch_size, height, width, 1)
            valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
            valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
            (grid_y, grid_x) = meshgrid(torch.linspace(0, height - 1, height, dtype=enc_output.dtype, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=enc_output.dtype, device=enc_output.device), indexing='ij')
            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
            scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
            grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
            width_heigth = torch.ones_like(grid) * 0.05 * 2.0 ** level
            proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4)
            proposals.append(proposal)
            _cur += height * width
        output_proposals = torch.cat(proposals, 1)
        output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
        output_proposals = torch.log(output_proposals / (1 - output_proposals))
        output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))
        output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
        object_query = enc_output
        object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
        object_query = object_query.masked_fill(~output_proposals_valid, float(0))
        object_query = self.enc_output_norm(self.enc_output(object_query))
        return (object_query, output_proposals)

    @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DeformableDetrModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        (batch_size, num_channels, height, width) = pixel_values.shape
        device = pixel_values.device
        if pixel_mask is None:
            pixel_mask = torch.ones((batch_size, height, width), dtype=torch.long, device=device)
        (features, position_embeddings_list) = self.backbone(pixel_values, pixel_mask)
        sources = []
        masks = []
        for (level, (source, mask)) in enumerate(features):
            sources.append(self.input_proj[level](source))
            masks.append(mask)
            if mask is None:
                raise ValueError('No attention mask was provided')
        if self.config.num_feature_levels > len(sources):
            _len_sources = len(sources)
            for level in range(_len_sources, self.config.num_feature_levels):
                if level == _len_sources:
                    source = self.input_proj[level](features[-1][0])
                else:
                    source = self.input_proj[level](sources[-1])
                mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
                pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
                sources.append(source)
                masks.append(mask)
                position_embeddings_list.append(pos_l)
        query_embeds = None
        if not self.config.two_stage:
            query_embeds = self.query_position_embeddings.weight
        source_flatten = []
        mask_flatten = []
        lvl_pos_embed_flatten = []
        spatial_shapes = []
        for (level, (source, mask, pos_embed)) in enumerate(zip(sources, masks, position_embeddings_list)):
            (batch_size, num_channels, height, width) = source.shape
            spatial_shape = (height, width)
            spatial_shapes.append(spatial_shape)
            source = source.flatten(2).transpose(1, 2)
            mask = mask.flatten(1)
            pos_embed = pos_embed.flatten(2).transpose(1, 2)
            lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
            lvl_pos_embed_flatten.append(lvl_pos_embed)
            source_flatten.append(source)
            mask_flatten.append(mask)
        source_flatten = torch.cat(source_flatten, 1)
        mask_flatten = torch.cat(mask_flatten, 1)
        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
        level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
        valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
        if encoder_outputs is None:
            encoder_outputs = self.encoder(inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
            encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
        (batch_size, _, num_channels) = encoder_outputs[0].shape
        enc_outputs_class = None
        enc_outputs_coord_logits = None
        if self.config.two_stage:
            (object_query_embedding, output_proposals) = self.gen_encoder_output_proposals(encoder_outputs[0], ~mask_flatten, spatial_shapes)
            enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding)
            delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding)
            enc_outputs_coord_logits = delta_bbox + output_proposals
            topk = self.config.two_stage_num_proposals
            topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
            topk_coords_logits = torch.gather(enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
            topk_coords_logits = topk_coords_logits.detach()
            reference_points = topk_coords_logits.sigmoid()
            init_reference_points = reference_points
            pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits)))
            (query_embed, target) = torch.split(pos_trans_out, num_channels, dim=2)
        else:
            (query_embed, target) = torch.split(query_embeds, num_channels, dim=1)
            query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1)
            target = target.unsqueeze(0).expand(batch_size, -1, -1)
            reference_points = self.reference_points(query_embed).sigmoid()
            init_reference_points = reference_points
        decoder_outputs = self.decoder(inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if not return_dict:
            enc_outputs = tuple((value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None))
            tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs
            return tuple_outputs
        return DeformableDetrModelOutput(init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits)

@add_start_docstrings('\n    Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on\n    top, for tasks such as COCO detection.\n    ', DEFORMABLE_DETR_START_DOCSTRING)
class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel):
    _tied_weights_keys = ['bbox_embed\\.[1-9]\\d*', 'class_embed\\.[1-9]\\d*']
    _no_split_modules = None

    def __init__(self, config: DeformableDetrConfig):
        super().__init__(config)
        self.model = DeformableDetrModel(config)
        self.class_embed = nn.Linear(config.d_model, config.num_labels)
        self.bbox_embed = DeformableDetrMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3)
        prior_prob = 0.01
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value
        nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
        num_pred = config.decoder_layers + 1 if config.two_stage else config.decoder_layers
        if config.with_box_refine:
            self.class_embed = _get_clones(self.class_embed, num_pred)
            self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
            nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
            self.model.decoder.bbox_embed = self.bbox_embed
        else:
            nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
            self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
            self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
            self.model.decoder.bbox_embed = None
        if config.two_stage:
            self.model.decoder.class_embed = self.class_embed
            for box_embed in self.bbox_embed:
                nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
        self.post_init()

    @torch.jit.unused
    def _set_aux_loss(self, outputs_class, outputs_coord):
        return [{'logits': a, 'pred_boxes': b} for (a, b) in zip(outputs_class[:-1], outputs_coord[:-1])]

    @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DeformableDetrObjectDetectionOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2]
        init_reference = outputs.init_reference_points if return_dict else outputs[0]
        inter_references = outputs.intermediate_reference_points if return_dict else outputs[3]
        outputs_classes = []
        outputs_coords = []
        for level in range(hidden_states.shape[1]):
            if level == 0:
                reference = init_reference
            else:
                reference = inter_references[:, level - 1]
            reference = inverse_sigmoid(reference)
            outputs_class = self.class_embed[level](hidden_states[:, level])
            delta_bbox = self.bbox_embed[level](hidden_states[:, level])
            if reference.shape[-1] == 4:
                outputs_coord_logits = delta_bbox + reference
            elif reference.shape[-1] == 2:
                delta_bbox[..., :2] += reference
                outputs_coord_logits = delta_bbox
            else:
                raise ValueError(f'reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}')
            outputs_coord = outputs_coord_logits.sigmoid()
            outputs_classes.append(outputs_class)
            outputs_coords.append(outputs_coord)
        outputs_class = torch.stack(outputs_classes)
        outputs_coord = torch.stack(outputs_coords)
        logits = outputs_class[-1]
        pred_boxes = outputs_coord[-1]
        (loss, loss_dict, auxiliary_outputs) = (None, None, None)
        if labels is not None:
            matcher = DeformableDetrHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost)
            losses = ['labels', 'boxes', 'cardinality']
            criterion = DeformableDetrLoss(matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses)
            criterion.to(self.device)
            outputs_loss = {}
            outputs_loss['logits'] = logits
            outputs_loss['pred_boxes'] = pred_boxes
            if self.config.auxiliary_loss:
                auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
                outputs_loss['auxiliary_outputs'] = auxiliary_outputs
            if self.config.two_stage:
                enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid()
                outputs_loss['enc_outputs'] = {'logits': outputs.enc_outputs_class, 'pred_boxes': enc_outputs_coord}
            loss_dict = criterion(outputs_loss, labels)
            weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient}
            weight_dict['loss_giou'] = self.config.giou_loss_coefficient
            if self.config.auxiliary_loss:
                aux_weight_dict = {}
                for i in range(self.config.decoder_layers - 1):
                    aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()})
                weight_dict.update(aux_weight_dict)
            loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict))
        if not return_dict:
            if auxiliary_outputs is not None:
                output = (logits, pred_boxes) + auxiliary_outputs + outputs
            else:
                output = (logits, pred_boxes) + outputs
            tuple_outputs = (loss, loss_dict) + output if loss is not None else output
            return tuple_outputs
        dict_outputs = DeformableDetrObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits)
        return dict_outputs

def dice_loss(inputs, targets, num_boxes):
    inputs = inputs.sigmoid()
    inputs = inputs.flatten(1)
    numerator = 2 * (inputs * targets).sum(1)
    denominator = inputs.sum(-1) + targets.sum(-1)
    loss = 1 - (numerator + 1) / (denominator + 1)
    return loss.sum() / num_boxes

def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2):
    prob = inputs.sigmoid()
    ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
    p_t = prob * targets + (1 - prob) * (1 - targets)
    loss = ce_loss * (1 - p_t) ** gamma
    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss = alpha_t * loss
    return loss.mean(1).sum() / num_boxes

class DeformableDetrLoss(nn.Module):

    def __init__(self, matcher, num_classes, focal_alpha, losses):
        super().__init__()
        self.matcher = matcher
        self.num_classes = num_classes
        self.focal_alpha = focal_alpha
        self.losses = losses

    def loss_labels(self, outputs, targets, indices, num_boxes):
        if 'logits' not in outputs:
            raise KeyError('No logits were found in the outputs')
        source_logits = outputs['logits']
        idx = self._get_source_permutation_idx(indices)
        target_classes_o = torch.cat([t['class_labels'][J] for (t, (_, J)) in zip(targets, indices)])
        target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device)
        target_classes[idx] = target_classes_o
        target_classes_onehot = torch.zeros([source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device)
        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
        target_classes_onehot = target_classes_onehot[:, :, :-1]
        loss_ce = sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1]
        losses = {'loss_ce': loss_ce}
        return losses

    @torch.no_grad()
    def loss_cardinality(self, outputs, targets, indices, num_boxes):
        logits = outputs['logits']
        device = logits.device
        target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device)
        card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
        card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
        losses = {'cardinality_error': card_err}
        return losses

    def loss_boxes(self, outputs, targets, indices, num_boxes):
        if 'pred_boxes' not in outputs:
            raise KeyError('No predicted boxes found in outputs')
        idx = self._get_source_permutation_idx(indices)
        source_boxes = outputs['pred_boxes'][idx]
        target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
        loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none')
        losses = {}
        losses['loss_bbox'] = loss_bbox.sum() / num_boxes
        loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)))
        losses['loss_giou'] = loss_giou.sum() / num_boxes
        return losses

    def _get_source_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(source, i) for (i, (source, _)) in enumerate(indices)])
        source_idx = torch.cat([source for (source, _) in indices])
        return (batch_idx, source_idx)

    def _get_target_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(target, i) for (i, (_, target)) in enumerate(indices)])
        target_idx = torch.cat([target for (_, target) in indices])
        return (batch_idx, target_idx)

    def get_loss(self, loss, outputs, targets, indices, num_boxes):
        loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes}
        if loss not in loss_map:
            raise ValueError(f'Loss {loss} not supported')
        return loss_map[loss](outputs, targets, indices, num_boxes)

    def forward(self, outputs, targets):
        outputs_without_aux = {k: v for (k, v) in outputs.items() if k != 'auxiliary_outputs' and k != 'enc_outputs'}
        indices = self.matcher(outputs_without_aux, targets)
        num_boxes = sum((len(t['class_labels']) for t in targets))
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        world_size = 1
        if is_accelerate_available():
            if PartialState._shared_state != {}:
                num_boxes = reduce(num_boxes)
                world_size = PartialState().num_processes
        num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
        if 'auxiliary_outputs' in outputs:
            for (i, auxiliary_outputs) in enumerate(outputs['auxiliary_outputs']):
                indices = self.matcher(auxiliary_outputs, targets)
                for loss in self.losses:
                    l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
                    l_dict = {k + f'_{i}': v for (k, v) in l_dict.items()}
                    losses.update(l_dict)
        if 'enc_outputs' in outputs:
            enc_outputs = outputs['enc_outputs']
            bin_targets = copy.deepcopy(targets)
            for bt in bin_targets:
                bt['class_labels'] = torch.zeros_like(bt['class_labels'])
            indices = self.matcher(enc_outputs, bin_targets)
            for loss in self.losses:
                l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes)
                l_dict = {k + '_enc': v for (k, v) in l_dict.items()}
                losses.update(l_dict)
        return losses

class DeformableDetrMLPPredictionHead(nn.Module):

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim])))

    def forward(self, x):
        for (i, layer) in enumerate(self.layers):
            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class DeformableDetrHungarianMatcher(nn.Module):

    def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):
        super().__init__()
        requires_backends(self, ['scipy'])
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):
            raise ValueError("All costs of the Matcher can't be 0")

    @torch.no_grad()
    def forward(self, outputs, targets):
        (batch_size, num_queries) = outputs['logits'].shape[:2]
        out_prob = outputs['logits'].flatten(0, 1).sigmoid()
        out_bbox = outputs['pred_boxes'].flatten(0, 1)
        target_ids = torch.cat([v['class_labels'] for v in targets])
        target_bbox = torch.cat([v['boxes'] for v in targets])
        alpha = 0.25
        gamma = 2.0
        neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log()
        pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log()
        class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
        sizes = [len(v['boxes']) for v in targets]
        indices = [linear_sum_assignment(c[i]) for (i, c) in enumerate(cost_matrix.split(sizes, -1))]
        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for (i, j) in indices]

def _upcast(t: Tensor) -> Tensor:
    if t.is_floating_point():
        return t if t.dtype in (torch.float32, torch.float64) else t.float()
    else:
        return t if t.dtype in (torch.int32, torch.int64) else t.int()

def box_area(boxes: Tensor) -> Tensor:
    boxes = _upcast(boxes)
    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])

def box_iou(boxes1, boxes2):
    area1 = box_area(boxes1)
    area2 = box_area(boxes2)
    left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2])
    right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (right_bottom - left_top).clamp(min=0)
    inter = width_height[:, :, 0] * width_height[:, :, 1]
    union = area1[:, None] + area2 - inter
    iou = inter / union
    return (iou, union)

def generalized_box_iou(boxes1, boxes2):
    if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
        raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}')
    if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
        raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}')
    (iou, union) = box_iou(boxes1, boxes2)
    top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
    bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (bottom_right - top_left).clamp(min=0)
    area = width_height[:, :, 0] * width_height[:, :, 1]
    return iou - (area - union) / area

def _max_by_axis(the_list):
    maxes = the_list[0]
    for sublist in the_list[1:]:
        for (index, item) in enumerate(sublist):
            maxes[index] = max(maxes[index], item)
    return maxes

class NestedTensor:

    def __init__(self, tensors, mask: Optional[Tensor]):
        self.tensors = tensors
        self.mask = mask

    def to(self, device):
        cast_tensor = self.tensors.to(device)
        mask = self.mask
        if mask is not None:
            cast_mask = mask.to(device)
        else:
            cast_mask = None
        return NestedTensor(cast_tensor, cast_mask)

    def decompose(self):
        return (self.tensors, self.mask)

    def __repr__(self):
        return str(self.tensors)

def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
    if tensor_list[0].ndim == 3:
        max_size = _max_by_axis([list(img.shape) for img in tensor_list])
        batch_shape = [len(tensor_list)] + max_size
        (batch_size, num_channels, height, width) = batch_shape
        dtype = tensor_list[0].dtype
        device = tensor_list[0].device
        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
        mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
        for (img, pad_img, m) in zip(tensor_list, tensor, mask):
            pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img)
            m[:img.shape[1], :img.shape[2]] = False
    else:
        raise ValueError('Only 3-dimensional tensors are supported')
    return NestedTensor(tensor, mask)

# File: transformers-main/src/transformers/models/deit/__init__.py
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available
_import_structure = {'configuration_deit': ['DeiTConfig', 'DeiTOnnxConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['feature_extraction_deit'] = ['DeiTFeatureExtractor']
    _import_structure['image_processing_deit'] = ['DeiTImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_deit'] = ['DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_deit'] = ['TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_deit import DeiTConfig, DeiTOnnxConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .feature_extraction_deit import DeiTFeatureExtractor
        from .image_processing_deit import DeiTImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_deit import TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deit/configuration_deit.py
""""""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)

class DeiTConfig(PretrainedConfig):
    model_type = 'deit'

    def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, encoder_stride=16, **kwargs):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.qkv_bias = qkv_bias
        self.encoder_stride = encoder_stride

class DeiTOnnxConfig(OnnxConfig):
    torch_onnx_minimum_version = version.parse('1.11')

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])

    @property
    def atol_for_validation(self) -> float:
        return 0.0001

# File: transformers-main/src/transformers/models/deit/convert_deit_timm_to_pytorch.py
""""""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def create_rename_keys(config, base_model=False):
    rename_keys = []
    for i in range(config.num_hidden_layers):
        rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight'))
        rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias'))
        rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight'))
        rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias'))
        rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight'))
        rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias'))
        rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight'))
        rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias'))
        rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight'))
        rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias'))
    rename_keys.extend([('cls_token', 'deit.embeddings.cls_token'), ('dist_token', 'deit.embeddings.distillation_token'), ('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'deit.embeddings.position_embeddings')])
    if base_model:
        rename_keys.extend([('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias')])
        rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith('deit') else pair for pair in rename_keys]
    else:
        rename_keys.extend([('norm.weight', 'deit.layernorm.weight'), ('norm.bias', 'deit.layernorm.bias'), ('head.weight', 'cls_classifier.weight'), ('head.bias', 'cls_classifier.bias'), ('head_dist.weight', 'distillation_classifier.weight'), ('head_dist.bias', 'distillation_classifier.bias')])
    return rename_keys

def read_in_q_k_v(state_dict, config, base_model=False):
    for i in range(config.num_hidden_layers):
        if base_model:
            prefix = ''
        else:
            prefix = 'deit.'
        in_proj_weight = state_dict.pop(f'blocks.{i}.attn.qkv.weight')
        in_proj_bias = state_dict.pop(f'blocks.{i}.attn.qkv.bias')
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :]
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:config.hidden_size]
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :]
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[config.hidden_size:config.hidden_size * 2]
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :]
        state_dict[f'{prefix}encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-config.hidden_size:]

def rename_key(dct, old, new):
    val = dct.pop(old)
    dct[new] = val

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_deit_checkpoint(deit_name, pytorch_dump_folder_path):
    config = DeiTConfig()
    base_model = False
    config.num_labels = 1000
    repo_id = 'huggingface/label-files'
    filename = 'imagenet-1k-id2label.json'
    id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r'))
    id2label = {int(k): v for (k, v) in id2label.items()}
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    config.patch_size = int(deit_name[-6:-4])
    config.image_size = int(deit_name[-3:])
    if deit_name[9:].startswith('tiny'):
        config.hidden_size = 192
        config.intermediate_size = 768
        config.num_hidden_layers = 12
        config.num_attention_heads = 3
    elif deit_name[9:].startswith('small'):
        config.hidden_size = 384
        config.intermediate_size = 1536
        config.num_hidden_layers = 12
        config.num_attention_heads = 6
    if deit_name[9:].startswith('base'):
        pass
    elif deit_name[4:].startswith('large'):
        config.hidden_size = 1024
        config.intermediate_size = 4096
        config.num_hidden_layers = 24
        config.num_attention_heads = 16
    timm_model = timm.create_model(deit_name, pretrained=True)
    timm_model.eval()
    state_dict = timm_model.state_dict()
    rename_keys = create_rename_keys(config, base_model)
    for (src, dest) in rename_keys:
        rename_key(state_dict, src, dest)
    read_in_q_k_v(state_dict, config, base_model)
    model = DeiTForImageClassificationWithTeacher(config).eval()
    model.load_state_dict(state_dict)
    size = int(256 / 224 * config.image_size)
    image_processor = DeiTImageProcessor(size=size, crop_size=config.image_size)
    encoding = image_processor(images=prepare_img(), return_tensors='pt')
    pixel_values = encoding['pixel_values']
    outputs = model(pixel_values)
    timm_logits = timm_model(pixel_values)
    assert timm_logits.shape == outputs.logits.shape
    assert torch.allclose(timm_logits, outputs.logits, atol=0.001)
    Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
    print(f'Saving model {deit_name} to {pytorch_dump_folder_path}')
    model.save_pretrained(pytorch_dump_folder_path)
    print(f'Saving image processor to {pytorch_dump_folder_path}')
    image_processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help="Name of the DeiT timm model you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.')
    args = parser.parse_args()
    convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)

# File: transformers-main/src/transformers/models/deit/feature_extraction_deit.py
""""""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
logger = logging.get_logger(__name__)

class DeiTFeatureExtractor(DeiTImageProcessor):

    def __init__(self, *args, **kwargs) -> None:
        warnings.warn('The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DeiTImageProcessor instead.', FutureWarning)
        super().__init__(*args, **kwargs)

# File: transformers-main/src/transformers/models/deit/image_processing_deit.py
""""""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
if is_vision_available():
    import PIL
logger = logging.get_logger(__name__)

class DeiTImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values']

    def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PIL.Image.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, rescale_factor: Union[int, float]=1 / 255, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, **kwargs) -> None:
        super().__init__(**kwargs)
        size = size if size is not None else {'height': 256, 'width': 256}
        size = get_size_dict(size)
        crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
        crop_size = get_size_dict(crop_size, param_name='crop_size')
        self.do_resize = do_resize
        self.size = size
        self.resample = resample
        self.do_center_crop = do_center_crop
        self.crop_size = crop_size
        self.do_rescale = do_rescale
        self.rescale_factor = rescale_factor
        self.do_normalize = do_normalize
        self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD

    def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        size = get_size_dict(size)
        if 'height' not in size or 'width' not in size:
            raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
        output_size = (size['height'], size['width'])
        return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)

    @filter_out_non_signature_kwargs()
    def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
        do_resize = do_resize if do_resize is not None else self.do_resize
        resample = resample if resample is not None else self.resample
        do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
        do_rescale = do_rescale if do_rescale is not None else self.do_rescale
        rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
        do_normalize = do_normalize if do_normalize is not None else self.do_normalize
        image_mean = image_mean if image_mean is not None else self.image_mean
        image_std = image_std if image_std is not None else self.image_std
        size = size if size is not None else self.size
        size = get_size_dict(size)
        crop_size = crop_size if crop_size is not None else self.crop_size
        crop_size = get_size_dict(crop_size, param_name='crop_size')
        images = make_list_of_images(images)
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        all_images = []
        for image in images:
            if do_resize:
                image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
            if do_center_crop:
                image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
            if do_rescale:
                image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
            if do_normalize:
                image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
            all_images.append(image)
        images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]
        data = {'pixel_values': images}
        return BatchFeature(data=data, tensor_type=return_tensors)

# File: transformers-main/src/transformers/models/deit/modeling_deit.py
""""""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Set, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, MaskedImageModelingOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int
from .configuration_deit import DeiTConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DeiTConfig'
_CHECKPOINT_FOR_DOC = 'facebook/deit-base-distilled-patch16-224'
_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
_IMAGE_CLASS_CHECKPOINT = 'facebook/deit-base-distilled-patch16-224'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

class DeiTEmbeddings(nn.Module):

    def __init__(self, config: DeiTConfig, use_mask_token: bool=False) -> None:
        super().__init__()
        self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
        self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
        self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
        self.patch_embeddings = DeiTPatchEmbeddings(config)
        num_patches = self.patch_embeddings.num_patches
        self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.patch_size = config.patch_size

    def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
        num_patches = embeddings.shape[1] - 2
        num_positions = self.position_embeddings.shape[1] - 2
        if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
            return self.position_embeddings
        class_and_dist_pos_embed = self.position_embeddings[:, :2]
        patch_pos_embed = self.position_embeddings[:, 2:]
        dim = embeddings.shape[-1]
        new_height = height // self.patch_size
        new_width = width // self.patch_size
        sqrt_num_positions = torch_int(num_positions ** 0.5)
        patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
        patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
        patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
        patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
        return torch.cat((class_and_dist_pos_embed, patch_pos_embed), dim=1)

    def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
        (_, _, height, width) = pixel_values.shape
        embeddings = self.patch_embeddings(pixel_values)
        (batch_size, seq_length, _) = embeddings.size()
        if bool_masked_pos is not None:
            mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
            mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
            embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)
        distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
        embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
        position_embedding = self.position_embeddings
        if interpolate_pos_encoding:
            position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
        embeddings = embeddings + position_embedding
        embeddings = self.dropout(embeddings)
        return embeddings

class DeiTPatchEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        (image_size, patch_size) = (config.image_size, config.patch_size)
        (num_channels, hidden_size) = (config.num_channels, config.hidden_size)
        image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
        patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.num_patches = num_patches
        self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)

    def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
        (batch_size, num_channels, height, width) = pixel_values.shape
        if num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        x = self.projection(pixel_values).flatten(2).transpose(1, 2)
        return x

class DeiTSelfAttention(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
            raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
        self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
        self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

    def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
        mixed_query_layer = self.query(hidden_states)
        key_layer = self.transpose_for_scores(self.key(hidden_states))
        value_layer = self.transpose_for_scores(self.value(hidden_states))
        query_layer = self.transpose_for_scores(mixed_query_layer)
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)
        attention_probs = self.dropout(attention_probs)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

class DeiTSdpaSelfAttention(DeiTSelfAttention):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.attention_probs_dropout_prob = config.attention_probs_dropout_prob

    def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
        mixed_query_layer = self.query(hidden_states)
        key_layer = self.transpose_for_scores(self.key(hidden_states))
        value_layer = self.transpose_for_scores(self.value(hidden_states))
        query_layer = self.transpose_for_scores(mixed_query_layer)
        context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, head_mask, self.attention_probs_dropout_prob if self.training else 0.0, is_causal=False, scale=None)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(new_context_layer_shape)
        return (context_layer, None)

class DeiTSelfOutput(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class DeiTAttention(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.attention = DeiTSelfAttention(config)
        self.output = DeiTSelfOutput(config)
        self.pruned_heads = set()

    def prune_heads(self, heads: Set[int]) -> None:
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
        self.attention.query = prune_linear_layer(self.attention.query, index)
        self.attention.key = prune_linear_layer(self.attention.key, index)
        self.attention.value = prune_linear_layer(self.attention.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
        self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
        self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
        self_outputs = self.attention(hidden_states, head_mask, output_attentions)
        attention_output = self.output(self_outputs[0], hidden_states)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

class DeiTSdpaAttention(DeiTAttention):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.attention = DeiTSdpaSelfAttention(config)

class DeiTIntermediate(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            self.intermediate_act_fn = config.hidden_act

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

class DeiTOutput(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        hidden_states = self.dense(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = hidden_states + input_tensor
        return hidden_states
DEIT_ATTENTION_CLASSES = {'eager': DeiTAttention, 'sdpa': DeiTSdpaAttention}

class DeiTLayer(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        self.seq_len_dim = 1
        self.attention = DEIT_ATTENTION_CLASSES[config._attn_implementation](config)
        self.intermediate = DeiTIntermediate(config)
        self.output = DeiTOutput(config)
        self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
        self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        hidden_states = attention_output + hidden_states
        layer_output = self.layernorm_after(hidden_states)
        layer_output = self.intermediate(layer_output)
        layer_output = self.output(layer_output, hidden_states)
        outputs = (layer_output,) + outputs
        return outputs

class DeiTEncoder(nn.Module):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__()
        self.config = config
        self.layer = nn.ModuleList([DeiTLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_head_mask = head_mask[i] if head_mask is not None else None
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions)
            else:
                layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)

class DeiTPreTrainedModel(PreTrainedModel):
    config_class = DeiTConfig
    base_model_prefix = 'deit'
    main_input_name = 'pixel_values'
    supports_gradient_checkpointing = True
    _no_split_modules = ['DeiTLayer']
    _supports_sdpa = True

    def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
        if isinstance(module, (nn.Linear, nn.Conv2d)):
            module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
DEIT_START_DOCSTRING = '\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DEIT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`DeiTImageProcessor.__call__`] for details.\n\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n            Whether to interpolate the pre-trained position encodings.\n'

@add_start_docstrings('The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.', DEIT_START_DOCSTRING)
class DeiTModel(DeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig, add_pooling_layer: bool=True, use_mask_token: bool=False) -> None:
        super().__init__(config)
        self.config = config
        self.embeddings = DeiTEmbeddings(config, use_mask_token=use_mask_token)
        self.encoder = DeiTEncoder(config)
        self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.pooler = DeiTPooler(config) if add_pooling_layer else None
        self.post_init()

    def get_input_embeddings(self) -> DeiTPatchEmbeddings:
        return self.embeddings.patch_embeddings

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BaseModelOutputWithPooling]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
        expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
        if pixel_values.dtype != expected_dtype:
            pixel_values = pixel_values.to(expected_dtype)
        embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
        encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output)
        pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
        if not return_dict:
            head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

class DeiTPooler(nn.Module):

    def __init__(self, config: DeiTConfig):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states):
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output

@add_start_docstrings('DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).\n\n    \n\n    Note that we provide a script to pre-train this model on custom data in our [examples\n    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n    \n    ', DEIT_START_DOCSTRING)
class DeiTForMaskedImageModeling(DeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.deit = DeiTModel(config, add_pooling_layer=False, use_mask_token=True)
        self.decoder = nn.Sequential(nn.Conv2d(in_channels=config.hidden_size, out_channels=config.encoder_stride ** 2 * config.num_channels, kernel_size=1), nn.PixelShuffle(config.encoder_stride))
        self.post_init()

    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[tuple, MaskedImageModelingOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
        sequence_output = outputs[0]
        sequence_output = sequence_output[:, 1:-1]
        (batch_size, sequence_length, num_channels) = sequence_output.shape
        height = width = int(sequence_length ** 0.5)
        sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
        reconstructed_pixel_values = self.decoder(sequence_output)
        masked_im_loss = None
        if bool_masked_pos is not None:
            size = self.config.image_size // self.config.patch_size
            bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
            mask = bool_masked_pos.repeat_interleave(self.config.patch_size, 1).repeat_interleave(self.config.patch_size, 2).unsqueeze(1).contiguous()
            reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction='none')
            masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-05) / self.config.num_channels
        if not return_dict:
            output = (reconstructed_pixel_values,) + outputs[1:]
            return (masked_im_loss,) + output if masked_im_loss is not None else output
        return MaskedImageModelingOutput(loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('\n    DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n    the [CLS] token) e.g. for ImageNet.\n    ', DEIT_START_DOCSTRING)
class DeiTForImageClassification(DeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deit = DeiTModel(config, add_pooling_layer=False)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[tuple, ImageClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output[:, 0, :])
        loss = None
        if labels is not None:
            labels = labels.to(logits.device)
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@dataclass
class DeiTForImageClassificationWithTeacherOutput(ModelOutput):
    logits: torch.FloatTensor = None
    cls_logits: torch.FloatTensor = None
    distillation_logits: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None

@add_start_docstrings('\n    DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of\n    the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.\n\n    .. warning::\n\n           This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n           supported.\n    ', DEIT_START_DOCSTRING)
class DeiTForImageClassificationWithTeacher(DeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deit = DeiTModel(config, add_pooling_layer=False)
        self.cls_classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.distillation_classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=DeiTForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[tuple, DeiTForImageClassificationWithTeacherOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)
        sequence_output = outputs[0]
        cls_logits = self.cls_classifier(sequence_output[:, 0, :])
        distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
        logits = (cls_logits + distillation_logits) / 2
        if not return_dict:
            output = (logits, cls_logits, distillation_logits) + outputs[1:]
            return output
        return DeiTForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/deit/modeling_tf_deit.py
""""""
from __future__ import annotations
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFImageClassifierOutput, TFMaskedImageModelingOutput
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ...tf_utils import shape_list, stable_softmax
from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_deit import DeiTConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DeiTConfig'
_CHECKPOINT_FOR_DOC = 'facebook/deit-base-distilled-patch16-224'
_EXPECTED_OUTPUT_SHAPE = [1, 198, 768]
_IMAGE_CLASS_CHECKPOINT = 'facebook/deit-base-distilled-patch16-224'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat'

@dataclass
class TFDeiTForImageClassificationWithTeacherOutput(ModelOutput):
    logits: tf.Tensor = None
    cls_logits: tf.Tensor = None
    distillation_logits: tf.Tensor = None
    hidden_states: Tuple[tf.Tensor] | None = None
    attentions: Tuple[tf.Tensor] | None = None

class TFDeiTEmbeddings(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, use_mask_token: bool=False, **kwargs) -> None:
        super().__init__(**kwargs)
        self.config = config
        self.use_mask_token = use_mask_token
        self.patch_embeddings = TFDeiTPatchEmbeddings(config=config, name='patch_embeddings')
        self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name='dropout')

    def build(self, input_shape=None):
        self.cls_token = self.add_weight(shape=(1, 1, self.config.hidden_size), initializer=keras.initializers.zeros(), trainable=True, name='cls_token')
        self.distillation_token = self.add_weight(shape=(1, 1, self.config.hidden_size), initializer=keras.initializers.zeros(), trainable=True, name='distillation_token')
        self.mask_token = None
        if self.use_mask_token:
            self.mask_token = self.add_weight(shape=(1, 1, self.config.hidden_size), initializer=keras.initializers.zeros(), trainable=True, name='mask_token')
        num_patches = self.patch_embeddings.num_patches
        self.position_embeddings = self.add_weight(shape=(1, num_patches + 2, self.config.hidden_size), initializer=keras.initializers.zeros(), trainable=True, name='position_embeddings')
        if self.built:
            return
        self.built = True
        if getattr(self, 'patch_embeddings', None) is not None:
            with tf.name_scope(self.patch_embeddings.name):
                self.patch_embeddings.build(None)
        if getattr(self, 'dropout', None) is not None:
            with tf.name_scope(self.dropout.name):
                self.dropout.build(None)

    def interpolate_pos_encoding(self, embeddings: tf.Tensor, height: int, width: int) -> tf.Tensor:
        num_patches = embeddings.shape[1] - 2
        num_positions = self.position_embeddings.shape[1] - 2
        if num_patches == num_positions and height == width:
            return self.position_embeddings
        class_pos_embed = self.position_embeddings[:, 0, :]
        dist_pos_embed = self.position_embeddings[:, 1, :]
        patch_pos_embed = self.position_embeddings[:, 2:, :]
        dim = embeddings.shape[-1]
        h0 = height // self.config.patch_size
        w0 = width // self.config.patch_size
        (h0, w0) = (h0 + 0.1, w0 + 0.1)
        patch_pos_embed = tf.reshape(patch_pos_embed, (1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim))
        patch_pos_embed = tf.image.resize(patch_pos_embed, size=(int(h0), int(w0)), method='bicubic')
        patch_pos_embed = tf.transpose(patch_pos_embed, perm=[0, 2, 3, 1])
        patch_pos_embed = tf.reshape(patch_pos_embed, (1, -1, dim))
        return tf.concat([tf.expand_dims(class_pos_embed, axis=0), tf.expand_dims(dist_pos_embed, axis=0), patch_pos_embed], axis=1)

    def call(self, pixel_values: tf.Tensor, bool_masked_pos: tf.Tensor | None=None, training: bool=False, interpolate_pos_encoding: bool=False) -> tf.Tensor:
        (_, height, width, _) = pixel_values.shape
        embeddings = self.patch_embeddings(pixel_values)
        (batch_size, seq_length, _) = shape_list(embeddings)
        if bool_masked_pos is not None:
            mask_tokens = tf.tile(self.mask_token, [batch_size, seq_length, 1])
            mask = tf.expand_dims(bool_masked_pos, axis=-1)
            mask = tf.cast(mask, dtype=mask_tokens.dtype)
            embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
        cls_tokens = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
        distillation_tokens = tf.repeat(self.distillation_token, repeats=batch_size, axis=0)
        embeddings = tf.concat((cls_tokens, distillation_tokens, embeddings), axis=1)
        position_embedding = self.position_embeddings
        if interpolate_pos_encoding:
            position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
        embeddings = embeddings + position_embedding
        embeddings = self.dropout(embeddings, training=training)
        return embeddings

class TFDeiTPatchEmbeddings(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs) -> None:
        super().__init__(**kwargs)
        (image_size, patch_size) = (config.image_size, config.patch_size)
        (num_channels, hidden_size) = (config.num_channels, config.hidden_size)
        image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
        patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
        num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
        self.image_size = image_size
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.num_patches = num_patches
        self.projection = keras.layers.Conv2D(hidden_size, kernel_size=patch_size, strides=patch_size, name='projection')

    def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
        (batch_size, height, width, num_channels) = shape_list(pixel_values)
        if tf.executing_eagerly() and num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        x = self.projection(pixel_values)
        (batch_size, height, width, num_channels) = shape_list(x)
        x = tf.reshape(x, (batch_size, height * width, num_channels))
        return x

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'projection', None) is not None:
            with tf.name_scope(self.projection.name):
                self.projection.build([None, None, None, self.num_channels])

class TFDeiTSelfAttention(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        if config.hidden_size % config.num_attention_heads != 0:
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
        self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query')
        self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key')
        self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value')
        self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
        self.config = config

    def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
        tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
        return tf.transpose(tensor, perm=[0, 2, 1, 3])

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]:
        batch_size = shape_list(hidden_states)[0]
        mixed_query_layer = self.query(inputs=hidden_states)
        mixed_key_layer = self.key(inputs=hidden_states)
        mixed_value_layer = self.value(inputs=hidden_states)
        query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
        key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
        value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
        attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
        dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
        attention_scores = tf.divide(attention_scores, dk)
        attention_probs = stable_softmax(logits=attention_scores, axis=-1)
        attention_probs = self.dropout(inputs=attention_probs, training=training)
        if head_mask is not None:
            attention_probs = tf.multiply(attention_probs, head_mask)
        attention_output = tf.matmul(attention_probs, value_layer)
        attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
        attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
        outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'query', None) is not None:
            with tf.name_scope(self.query.name):
                self.query.build([None, None, self.config.hidden_size])
        if getattr(self, 'key', None) is not None:
            with tf.name_scope(self.key.name):
                self.key.build([None, None, self.config.hidden_size])
        if getattr(self, 'value', None) is not None:
            with tf.name_scope(self.value.name):
                self.value.build([None, None, self.config.hidden_size])

class TFDeiTSelfOutput(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFDeiTAttention(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.self_attention = TFDeiTSelfAttention(config, name='attention')
        self.dense_output = TFDeiTSelfOutput(config, name='output')

    def prune_heads(self, heads):
        raise NotImplementedError

    def call(self, input_tensor: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]:
        self_outputs = self.self_attention(hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training)
        attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=input_tensor, training=training)
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'self_attention', None) is not None:
            with tf.name_scope(self.self_attention.name):
                self.self_attention.build(None)
        if getattr(self, 'dense_output', None) is not None:
            with tf.name_scope(self.dense_output.name):
                self.dense_output.build(None)

class TFDeiTIntermediate(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = get_tf_activation(config.hidden_act)
        else:
            self.intermediate_act_fn = config.hidden_act
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.intermediate_act_fn(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFDeiTOutput(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense')
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.config = config

    def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.dense(inputs=hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        hidden_states = hidden_states + input_tensor
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.intermediate_size])

class TFDeiTLayer(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.attention = TFDeiTAttention(config, name='attention')
        self.intermediate = TFDeiTIntermediate(config, name='intermediate')
        self.deit_output = TFDeiTOutput(config, name='output')
        self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm_before')
        self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm_after')
        self.config = config

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]:
        attention_outputs = self.attention(input_tensor=self.layernorm_before(inputs=hidden_states, training=training), head_mask=head_mask, output_attentions=output_attentions, training=training)
        attention_output = attention_outputs[0]
        hidden_states = attention_output + hidden_states
        layer_output = self.layernorm_after(inputs=hidden_states, training=training)
        intermediate_output = self.intermediate(hidden_states=layer_output, training=training)
        layer_output = self.deit_output(hidden_states=intermediate_output, input_tensor=hidden_states, training=training)
        outputs = (layer_output,) + attention_outputs[1:]
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'attention', None) is not None:
            with tf.name_scope(self.attention.name):
                self.attention.build(None)
        if getattr(self, 'intermediate', None) is not None:
            with tf.name_scope(self.intermediate.name):
                self.intermediate.build(None)
        if getattr(self, 'deit_output', None) is not None:
            with tf.name_scope(self.deit_output.name):
                self.deit_output.build(None)
        if getattr(self, 'layernorm_before', None) is not None:
            with tf.name_scope(self.layernorm_before.name):
                self.layernorm_before.build([None, None, self.config.hidden_size])
        if getattr(self, 'layernorm_after', None) is not None:
            with tf.name_scope(self.layernorm_after.name):
                self.layernorm_after.build([None, None, self.config.hidden_size])

class TFDeiTEncoder(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.layer = [TFDeiTLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)]

    def call(self, hidden_states: tf.Tensor, head_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
        all_hidden_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, layer_module) in enumerate(self.layer):
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
            layer_outputs = layer_module(hidden_states=hidden_states, head_mask=head_mask[i], output_attentions=output_attentions, training=training)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'layer', None) is not None:
            for layer in self.layer:
                with tf.name_scope(layer.name):
                    layer.build(None)

@keras_serializable
class TFDeiTMainLayer(keras.layers.Layer):
    config_class = DeiTConfig

    def __init__(self, config: DeiTConfig, add_pooling_layer: bool=True, use_mask_token: bool=False, **kwargs) -> None:
        super().__init__(**kwargs)
        self.config = config
        self.embeddings = TFDeiTEmbeddings(config, use_mask_token=use_mask_token, name='embeddings')
        self.encoder = TFDeiTEncoder(config, name='encoder')
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm')
        self.pooler = TFDeiTPooler(config, name='pooler') if add_pooling_layer else None

    def get_input_embeddings(self) -> TFDeiTPatchEmbeddings:
        return self.embeddings.patch_embeddings

    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError

    def get_head_mask(self, head_mask):
        if head_mask is not None:
            raise NotImplementedError
        else:
            head_mask = [None] * self.config.num_hidden_layers
        return head_mask

    @unpack_inputs
    def call(self, pixel_values: tf.Tensor | None=None, bool_masked_pos: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        pixel_values = tf.transpose(pixel_values, (0, 2, 3, 1))
        head_mask = self.get_head_mask(head_mask)
        embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, training=training, interpolate_pos_encoding=interpolate_pos_encoding)
        encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output, training=training)
        pooled_output = self.pooler(sequence_output, training=training) if self.pooler is not None else None
        if not return_dict:
            head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'embeddings', None) is not None:
            with tf.name_scope(self.embeddings.name):
                self.embeddings.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, self.config.hidden_size])
        if getattr(self, 'pooler', None) is not None:
            with tf.name_scope(self.pooler.name):
                self.pooler.build(None)

class TFDeiTPreTrainedModel(TFPreTrainedModel):
    config_class = DeiTConfig
    base_model_prefix = 'deit'
    main_input_name = 'pixel_values'
DEIT_START_DOCSTRING = '\n    This model is a TensorFlow\n    [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular\n    TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.\n\n    Parameters:\n        config ([`DeiTConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DEIT_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`DeiTImageProcessor.__call__`] for details.\n\n        head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n            Whether to interpolate the pre-trained position encodings.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare DeiT Model transformer outputting raw hidden-states without any specific head on top.', DEIT_START_DOCSTRING)
class TFDeiTModel(TFDeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig, add_pooling_layer: bool=True, use_mask_token: bool=False, **kwargs) -> None:
        super().__init__(config, **kwargs)
        self.deit = TFDeiTMainLayer(config, add_pooling_layer=add_pooling_layer, use_mask_token=use_mask_token, name='deit')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def call(self, pixel_values: tf.Tensor | None=None, bool_masked_pos: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, training: bool=False) -> Union[Tuple, TFBaseModelOutputWithPooling]:
        outputs = self.deit(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deit', None) is not None:
            with tf.name_scope(self.deit.name):
                self.deit.build(None)

class TFDeiTPooler(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs):
        super().__init__(**kwargs)
        self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense')
        self.config = config

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(inputs=first_token_tensor)
        return pooled_output

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'dense', None) is not None:
            with tf.name_scope(self.dense.name):
                self.dense.build([None, None, self.config.hidden_size])

class TFDeitPixelShuffle(keras.layers.Layer):

    def __init__(self, upscale_factor: int, **kwargs) -> None:
        super().__init__(**kwargs)
        if not isinstance(upscale_factor, int) or upscale_factor < 2:
            raise ValueError(f'upscale_factor must be an integer value >= 2 got {upscale_factor}')
        self.upscale_factor = upscale_factor

    def call(self, x: tf.Tensor) -> tf.Tensor:
        hidden_states = x
        (batch_size, _, _, num_input_channels) = shape_list(hidden_states)
        block_size_squared = self.upscale_factor ** 2
        output_depth = int(num_input_channels / block_size_squared)
        permutation = tf.constant([[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]])
        hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1)
        hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format='NHWC')
        return hidden_states

class TFDeitDecoder(keras.layers.Layer):

    def __init__(self, config: DeiTConfig, **kwargs) -> None:
        super().__init__(**kwargs)
        self.conv2d = keras.layers.Conv2D(filters=config.encoder_stride ** 2 * config.num_channels, kernel_size=1, name='0')
        self.pixel_shuffle = TFDeitPixelShuffle(config.encoder_stride, name='1')
        self.config = config

    def call(self, inputs: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = inputs
        hidden_states = self.conv2d(hidden_states)
        hidden_states = self.pixel_shuffle(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'conv2d', None) is not None:
            with tf.name_scope(self.conv2d.name):
                self.conv2d.build([None, None, None, self.config.hidden_size])
        if getattr(self, 'pixel_shuffle', None) is not None:
            with tf.name_scope(self.pixel_shuffle.name):
                self.pixel_shuffle.build(None)

@add_start_docstrings('DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).', DEIT_START_DOCSTRING)
class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, use_mask_token=True, name='deit')
        self.decoder = TFDeitDecoder(config, name='decoder')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: tf.Tensor | None=None, bool_masked_pos: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, training: bool=False) -> Union[tuple, TFMaskedImageModelingOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, training=training)
        sequence_output = outputs[0]
        sequence_output = sequence_output[:, 1:-1]
        (batch_size, sequence_length, num_channels) = shape_list(sequence_output)
        height = width = int(sequence_length ** 0.5)
        sequence_output = tf.reshape(sequence_output, (batch_size, height, width, num_channels))
        reconstructed_pixel_values = self.decoder(sequence_output, training=training)
        reconstructed_pixel_values = tf.transpose(reconstructed_pixel_values, (0, 3, 1, 2))
        masked_im_loss = None
        if bool_masked_pos is not None:
            size = self.config.image_size // self.config.patch_size
            bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size))
            mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1)
            mask = tf.repeat(mask, self.config.patch_size, 2)
            mask = tf.expand_dims(mask, 1)
            mask = tf.cast(mask, tf.float32)
            reconstruction_loss = keras.losses.mean_absolute_error(tf.transpose(pixel_values, (1, 2, 3, 0)), tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)))
            reconstruction_loss = tf.expand_dims(reconstruction_loss, 0)
            total_loss = tf.reduce_sum(reconstruction_loss * mask)
            num_masked_pixels = (tf.reduce_sum(mask) + 1e-05) * self.config.num_channels
            masked_im_loss = total_loss / num_masked_pixels
            masked_im_loss = tf.reshape(masked_im_loss, (1,))
        if not return_dict:
            output = (reconstructed_pixel_values,) + outputs[1:]
            return (masked_im_loss,) + output if masked_im_loss is not None else output
        return TFMaskedImageModelingOutput(loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deit', None) is not None:
            with tf.name_scope(self.deit.name):
                self.deit.build(None)
        if getattr(self, 'decoder', None) is not None:
            with tf.name_scope(self.decoder.name):
                self.decoder.build(None)

@add_start_docstrings('\n    DeiT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n    the [CLS] token) e.g. for ImageNet.\n    ', DEIT_START_DOCSTRING)
class TFDeiTForImageClassification(TFDeiTPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: DeiTConfig):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name='deit')
        self.classifier = keras.layers.Dense(config.num_labels, name='classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def call(self, pixel_values: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, labels: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, training: bool=False) -> Union[tf.Tensor, TFImageClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, training=training)
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output[:, 0, :])
        loss = None if labels is None else self.hf_compute_loss(labels, logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deit', None) is not None:
            with tf.name_scope(self.deit.name):
                self.deit.build(None)
        if getattr(self, 'classifier', None) is not None:
            with tf.name_scope(self.classifier.name):
                self.classifier.build([None, None, self.config.hidden_size])

@add_start_docstrings('\n    DeiT Model transformer with image classification heads on top (a linear layer on top of the final hidden state of\n    the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.\n\n    .. warning::\n\n            This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n            supported.\n    ', DEIT_START_DOCSTRING)
class TFDeiTForImageClassificationWithTeacher(TFDeiTPreTrainedModel):

    def __init__(self, config: DeiTConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.deit = TFDeiTMainLayer(config, add_pooling_layer=False, name='deit')
        self.cls_classifier = keras.layers.Dense(config.num_labels, name='cls_classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='cls_classifier')
        self.distillation_classifier = keras.layers.Dense(config.num_labels, name='distillation_classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='distillation_classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(DEIT_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFDeiTForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def call(self, pixel_values: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, training: bool=False) -> Union[tuple, TFDeiTForImageClassificationWithTeacherOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.deit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, training=training)
        sequence_output = outputs[0]
        cls_logits = self.cls_classifier(sequence_output[:, 0, :])
        distillation_logits = self.distillation_classifier(sequence_output[:, 1, :])
        logits = (cls_logits + distillation_logits) / 2
        if not return_dict:
            output = (logits, cls_logits, distillation_logits) + outputs[1:]
            return output
        return TFDeiTForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'deit', None) is not None:
            with tf.name_scope(self.deit.name):
                self.deit.build(None)
        if getattr(self, 'cls_classifier', None) is not None:
            with tf.name_scope(self.cls_classifier.name):
                self.cls_classifier.build([None, None, self.config.hidden_size])
        if getattr(self, 'distillation_classifier', None) is not None:
            with tf.name_scope(self.distillation_classifier.name):
                self.distillation_classifier.build([None, None, self.config.hidden_size])

# File: transformers-main/src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py
""""""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
    raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
    raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
SAMPLE_TEXT = 'The Nymphenburg Palace is a beautiful palace in Munich!'

def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
    bort_4_8_768_1024_hparams = {'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1024, 'hidden_size': 768, 'max_length': 512, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1024, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-05, 'token_type_vocab_size': 2}
    predefined_args = bort_4_8_768_1024_hparams
    encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=False, output_all_encodings=False, use_residual=predefined_args['use_residual'], activation=predefined_args.get('activation', 'gelu'), layer_norm_eps=predefined_args.get('layer_norm_eps', None))
    vocab_name = 'openwebtext_ccnews_stories_books_cased'
    gluon_cache_dir = os.path.join(get_home_dir(), 'models')
    bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
    original_bort = nlp.model.BERTModel(encoder, len(bort_vocab), units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=False, use_token_type_embed=False, token_type_vocab_size=predefined_args['token_type_vocab_size'], use_classifier=False, use_decoder=False)
    original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
    params = original_bort._collect_params_with_prefix()
    hf_bort_config_json = {'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, 'type_vocab_size': 1, 'vocab_size': len(bort_vocab)}
    hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
    hf_bort_model = BertForMaskedLM(hf_bort_config)
    hf_bort_model.eval()

    def to_torch(mx_array) -> nn.Parameter:
        return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))

    def check_and_map_params(hf_param, gluon_param):
        shape_hf = hf_param.shape
        gluon_param = to_torch(params[gluon_param])
        shape_gluon = gluon_param.shape
        assert shape_hf == shape_gluon, f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
        return gluon_param
    hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(hf_bort_model.bert.embeddings.word_embeddings.weight, 'word_embed.0.weight')
    hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(hf_bort_model.bert.embeddings.position_embeddings.weight, 'encoder.position_weight')
    hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(hf_bort_model.bert.embeddings.LayerNorm.bias, 'encoder.layer_norm.beta')
    hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(hf_bort_model.bert.embeddings.LayerNorm.weight, 'encoder.layer_norm.gamma')
    hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(hf_bort_model.bert.embeddings.token_type_embeddings.weight.data)
    for i in range(hf_bort_config.num_hidden_layers):
        layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
        self_attn: BertSelfAttention = layer.attention.self
        self_attn.key.bias.data = check_and_map_params(self_attn.key.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias')
        self_attn.key.weight.data = check_and_map_params(self_attn.key.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight')
        self_attn.query.bias.data = check_and_map_params(self_attn.query.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias')
        self_attn.query.weight.data = check_and_map_params(self_attn.query.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight')
        self_attn.value.bias.data = check_and_map_params(self_attn.value.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias')
        self_attn.value.weight.data = check_and_map_params(self_attn.value.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight')
        self_output: BertSelfOutput = layer.attention.output
        self_output.dense.bias = check_and_map_params(self_output.dense.bias, f'encoder.transformer_cells.{i}.proj.bias')
        self_output.dense.weight = check_and_map_params(self_output.dense.weight, f'encoder.transformer_cells.{i}.proj.weight')
        self_output.LayerNorm.bias = check_and_map_params(self_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.layer_norm.beta')
        self_output.LayerNorm.weight = check_and_map_params(self_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.layer_norm.gamma')
        intermediate: BertIntermediate = layer.intermediate
        intermediate.dense.bias = check_and_map_params(intermediate.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_1.bias')
        intermediate.dense.weight = check_and_map_params(intermediate.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_1.weight')
        bert_output: BertOutput = layer.output
        bert_output.dense.bias = check_and_map_params(bert_output.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_2.bias')
        bert_output.dense.weight = check_and_map_params(bert_output.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_2.weight')
        bert_output.LayerNorm.bias = check_and_map_params(bert_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.ffn.layer_norm.beta')
        bert_output.LayerNorm.weight = check_and_map_params(bert_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma')
    hf_bort_model.half()
    tokenizer = RobertaTokenizer.from_pretrained('FacebookAI/roberta-base')
    input_ids = tokenizer.encode_plus(SAMPLE_TEXT)['input_ids']
    gluon_input_ids = mx.nd.array([input_ids])
    output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
    hf_bort_model.save_pretrained(pytorch_dump_folder_path)
    hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
    hf_bort_model.eval()
    input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors='pt')
    output_hf = hf_bort_model(**input_ids)[0]
    gluon_layer = output_gluon[0].asnumpy()
    hf_layer = output_hf[0].detach().numpy()
    max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item()
    success = np.allclose(gluon_layer, hf_layer, atol=0.001)
    if success:
        print('✔️ Both model do output the same tensors')
    else:
        print('❌ Both model do **NOT** output the same tensors')
        print('Absolute difference is:', max_absolute_diff)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.')
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.')
    args = parser.parse_args()
    convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)

# File: transformers-main/src/transformers/models/deprecated/deta/__init__.py
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {'configuration_deta': ['DetaConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['image_processing_deta'] = ['DetaImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_deta'] = ['DetaForObjectDetection', 'DetaModel', 'DetaPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_deta import DetaConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .image_processing_deta import DetaImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_deta import DetaForObjectDetection, DetaModel, DetaPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deprecated/deta/configuration_deta.py
""""""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
from ...auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)

class DetaConfig(PretrainedConfig):
    model_type = 'deta'
    attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}

    def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, num_queries=900, max_position_embeddings=2048, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=1024, decoder_attention_heads=8, encoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, return_intermediate=True, auxiliary_loss=False, position_embedding_type='sine', num_feature_levels=5, encoder_n_points=4, decoder_n_points=4, two_stage=True, two_stage_num_proposals=300, with_box_refine=True, assign_first_stage=True, assign_second_stage=True, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, focal_alpha=0.25, disable_custom_kernels=True, **kwargs):
        if use_pretrained_backbone:
            raise ValueError('Pretrained backbones are not supported yet.')
        if backbone_config is not None and backbone is not None:
            raise ValueError("You can't specify both `backbone` and `backbone_config`.")
        if backbone_config is None and backbone is None:
            logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
            backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
        elif isinstance(backbone_config, dict):
            backbone_model_type = backbone_config.pop('model_type')
            config_class = CONFIG_MAPPING[backbone_model_type]
            backbone_config = config_class.from_dict(backbone_config)
        if backbone_kwargs is not None and backbone_kwargs and (backbone_config is not None):
            raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
        self.backbone_config = backbone_config
        self.backbone = backbone
        self.use_pretrained_backbone = use_pretrained_backbone
        self.use_timm_backbone = use_timm_backbone
        self.backbone_kwargs = backbone_kwargs
        self.num_queries = num_queries
        self.max_position_embeddings = max_position_embeddings
        self.d_model = d_model
        self.encoder_ffn_dim = encoder_ffn_dim
        self.encoder_layers = encoder_layers
        self.encoder_attention_heads = encoder_attention_heads
        self.decoder_ffn_dim = decoder_ffn_dim
        self.decoder_layers = decoder_layers
        self.decoder_attention_heads = decoder_attention_heads
        self.dropout = dropout
        self.attention_dropout = attention_dropout
        self.activation_dropout = activation_dropout
        self.activation_function = activation_function
        self.init_std = init_std
        self.init_xavier_std = init_xavier_std
        self.encoder_layerdrop = encoder_layerdrop
        self.auxiliary_loss = auxiliary_loss
        self.position_embedding_type = position_embedding_type
        self.num_feature_levels = num_feature_levels
        self.encoder_n_points = encoder_n_points
        self.decoder_n_points = decoder_n_points
        self.two_stage = two_stage
        self.two_stage_num_proposals = two_stage_num_proposals
        self.with_box_refine = with_box_refine
        self.assign_first_stage = assign_first_stage
        self.assign_second_stage = assign_second_stage
        if two_stage is True and with_box_refine is False:
            raise ValueError('If two_stage is True, with_box_refine must be True.')
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        self.mask_loss_coefficient = mask_loss_coefficient
        self.dice_loss_coefficient = dice_loss_coefficient
        self.bbox_loss_coefficient = bbox_loss_coefficient
        self.giou_loss_coefficient = giou_loss_coefficient
        self.eos_coefficient = eos_coefficient
        self.focal_alpha = focal_alpha
        self.disable_custom_kernels = disable_custom_kernels
        super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)

    @property
    def num_attention_heads(self) -> int:
        return self.encoder_attention_heads

    @property
    def hidden_size(self) -> int:
        return self.d_model

# File: transformers-main/src/transformers/models/deprecated/deta/convert_deta_resnet_to_pytorch.py
""""""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def get_deta_config():
    config = DetaConfig(num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=True, with_box_refine=True, two_stage=True)
    config.num_labels = 91
    repo_id = 'huggingface/label-files'
    filename = 'coco-detection-id2label.json'
    id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type='dataset')).read_text())
    id2label = {int(k): v for (k, v) in id2label.items()}
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    return config

def create_rename_keys(config):
    rename_keys = []
    rename_keys.append(('backbone.0.body.conv1.weight', 'model.backbone.model.embedder.embedder.convolution.weight'))
    rename_keys.append(('backbone.0.body.bn1.weight', 'model.backbone.model.embedder.embedder.normalization.weight'))
    rename_keys.append(('backbone.0.body.bn1.bias', 'model.backbone.model.embedder.embedder.normalization.bias'))
    rename_keys.append(('backbone.0.body.bn1.running_mean', 'model.backbone.model.embedder.embedder.normalization.running_mean'))
    rename_keys.append(('backbone.0.body.bn1.running_var', 'model.backbone.model.embedder.embedder.normalization.running_var'))
    for stage_idx in range(len(config.backbone_config.depths)):
        for layer_idx in range(config.backbone_config.depths[stage_idx]):
            if layer_idx == 0:
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var'))
            for i in range(3):
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i + 1}.weight', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.weight', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.bias', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.running_mean', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean'))
                rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.running_var', f'model.backbone.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var'))
    for i in range(config.encoder_layers):
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias'))
    for i in range(config.decoder_layers):
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias'))
    return rename_keys

def rename_key(dct, old, new):
    val = dct.pop(old)
    dct[new] = val

def read_in_decoder_q_k_v(state_dict, config):
    hidden_size = config.d_model
    for i in range(config.decoder_layers):
        in_proj_weight = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight')
        in_proj_bias = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias')
        state_dict[f'model.decoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:hidden_size, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:hidden_size]
        state_dict[f'model.decoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[hidden_size:hidden_size * 2]
        state_dict[f'model.decoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-hidden_size:, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-hidden_size:]

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
    config = get_deta_config()
    if model_name == 'deta-resnet-50':
        filename = 'adet_checkpoint0011.pth'
    elif model_name == 'deta-resnet-50-24-epochs':
        filename = 'adet_2x_checkpoint0023.pth'
    else:
        raise ValueError(f'Model name {model_name} not supported')
    checkpoint_path = hf_hub_download(repo_id='nielsr/deta-checkpoints', filename=filename)
    state_dict = torch.load(checkpoint_path, map_location='cpu')['model']
    rename_keys = create_rename_keys(config)
    for (src, dest) in rename_keys:
        rename_key(state_dict, src, dest)
    read_in_decoder_q_k_v(state_dict, config)
    for key in state_dict.copy().keys():
        if 'transformer.decoder.class_embed' in key or 'transformer.decoder.bbox_embed' in key:
            val = state_dict.pop(key)
            state_dict[key.replace('transformer.decoder', 'model.decoder')] = val
        if 'input_proj' in key:
            val = state_dict.pop(key)
            state_dict['model.' + key] = val
        if 'level_embed' in key or 'pos_trans' in key or 'pix_trans' in key or ('enc_output' in key):
            val = state_dict.pop(key)
            state_dict[key.replace('transformer', 'model')] = val
    model = DetaForObjectDetection(config)
    model.load_state_dict(state_dict)
    model.eval()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.to(device)
    processor = DetaImageProcessor(format='coco_detection')
    img = prepare_img()
    encoding = processor(images=img, return_tensors='pt')
    pixel_values = encoding['pixel_values']
    outputs = model(pixel_values.to(device))
    if model_name == 'deta-resnet-50':
        expected_logits = torch.tensor([[-7.3978, -2.5406, -4.1668], [-8.2684, -3.9933, -3.8096], [-7.0515, -3.7973, -5.8516]])
        expected_boxes = torch.tensor([[0.5043, 0.4973, 0.9998], [0.2542, 0.5489, 0.4748], [0.549, 0.2765, 0.057]])
    elif model_name == 'deta-resnet-50-24-epochs':
        expected_logits = torch.tensor([[-7.1688, -2.4857, -4.8669], [-7.863, -3.8154, -4.2674], [-7.273, -4.1865, -5.5323]])
        expected_boxes = torch.tensor([[0.5021, 0.4971, 0.9994], [0.2546, 0.5486, 0.4731], [0.1686, 0.1986, 0.2142]])
    assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=0.0001)
    assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=0.0001)
    print('Everything ok!')
    if pytorch_dump_folder_path:
        logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...')
        Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
        model.save_pretrained(pytorch_dump_folder_path)
        processor.save_pretrained(pytorch_dump_folder_path)
    if push_to_hub:
        print('Pushing model and processor to hub...')
        model.push_to_hub(f'jozhang97/{model_name}')
        processor.push_to_hub(f'jozhang97/{model_name}')
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', type=str, default='deta-resnet-50', choices=['deta-resnet-50', 'deta-resnet-50-24-epochs'], help="Name of the model you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.')
    parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.')
    args = parser.parse_args()
    convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)

# File: transformers-main/src/transformers/models/deprecated/deta/convert_deta_swin_to_pytorch.py
""""""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)

def get_deta_config(model_name):
    backbone_config = SwinConfig(embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=['stage2', 'stage3', 'stage4'])
    config = DetaConfig(backbone_config=backbone_config, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=True, with_box_refine=True, two_stage=True)
    repo_id = 'huggingface/label-files'
    if 'o365' in model_name:
        num_labels = 366
        filename = 'object365-id2label.json'
    else:
        num_labels = 91
        filename = 'coco-detection-id2label.json'
    config.num_labels = num_labels
    id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type='dataset')).read_text())
    id2label = {int(k): v for (k, v) in id2label.items()}
    config.id2label = id2label
    config.label2id = {v: k for (k, v) in id2label.items()}
    return config

def create_rename_keys(config):
    rename_keys = []
    rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight'))
    rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias'))
    rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight'))
    rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias'))
    for i in range(len(config.backbone_config.depths)):
        for j in range(config.backbone_config.depths[i]):
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'))
        if i < 3:
            rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight'))
            rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias'))
    rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight'))
    rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias'))
    rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight'))
    rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias'))
    rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight'))
    rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias'))
    for i in range(config.encoder_layers):
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight'))
        rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias'))
    for i in range(config.decoder_layers):
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight'))
        rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias'))
    return rename_keys

def rename_key(dct, old, new):
    val = dct.pop(old)
    dct[new] = val

def read_in_swin_q_k_v(state_dict, backbone_config):
    num_features = [int(backbone_config.embed_dim * 2 ** i) for i in range(len(backbone_config.depths))]
    for i in range(len(backbone_config.depths)):
        dim = num_features[i]
        for j in range(backbone_config.depths[i]):
            in_proj_weight = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight')
            in_proj_bias = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias')
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight'] = in_proj_weight[:dim, :]
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias'] = in_proj_bias[:dim]
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight'] = in_proj_weight[dim:dim * 2, :]
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias'] = in_proj_bias[dim:dim * 2]
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight'] = in_proj_weight[-dim:, :]
            state_dict[f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias'] = in_proj_bias[-dim:]

def read_in_decoder_q_k_v(state_dict, config):
    hidden_size = config.d_model
    for i in range(config.decoder_layers):
        in_proj_weight = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight')
        in_proj_bias = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias')
        state_dict[f'model.decoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:hidden_size, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:hidden_size]
        state_dict[f'model.decoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[hidden_size:hidden_size * 2]
        state_dict[f'model.decoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-hidden_size:, :]
        state_dict[f'model.decoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-hidden_size:]

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    im = Image.open(requests.get(url, stream=True).raw)
    return im

@torch.no_grad()
def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
    config = get_deta_config(model_name)
    if model_name == 'deta-swin-large':
        checkpoint_path = hf_hub_download(repo_id='nielsr/deta-checkpoints', filename='adet_swin_ft.pth')
    elif model_name == 'deta-swin-large-o365':
        checkpoint_path = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365', filename='deta_swin_pt_o365.pth')
    else:
        raise ValueError(f'Model name {model_name} not supported')
    state_dict = torch.load(checkpoint_path, map_location='cpu')['model']
    for (name, param) in state_dict.items():
        print(name, param.shape)
    rename_keys = create_rename_keys(config)
    for (src, dest) in rename_keys:
        rename_key(state_dict, src, dest)
    read_in_swin_q_k_v(state_dict, config.backbone_config)
    read_in_decoder_q_k_v(state_dict, config)
    for key in state_dict.copy().keys():
        if 'transformer.decoder.class_embed' in key or 'transformer.decoder.bbox_embed' in key:
            val = state_dict.pop(key)
            state_dict[key.replace('transformer.decoder', 'model.decoder')] = val
        if 'input_proj' in key:
            val = state_dict.pop(key)
            state_dict['model.' + key] = val
        if 'level_embed' in key or 'pos_trans' in key or 'pix_trans' in key or ('enc_output' in key):
            val = state_dict.pop(key)
            state_dict[key.replace('transformer', 'model')] = val
    model = DetaForObjectDetection(config)
    model.load_state_dict(state_dict)
    model.eval()
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.to(device)
    processor = DetaImageProcessor(format='coco_detection')
    img = prepare_img()
    encoding = processor(images=img, return_tensors='pt')
    pixel_values = encoding['pixel_values']
    outputs = model(pixel_values.to(device))
    print('Logits:', outputs.logits[0, :3, :3])
    print('Boxes:', outputs.pred_boxes[0, :3, :3])
    if model_name == 'deta-swin-large':
        expected_logits = torch.tensor([[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
        expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
    elif model_name == 'deta-swin-large-o365':
        expected_logits = torch.tensor([[-8.0122, -3.572, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.661, -3.6194, -5.0134]])
        expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
    assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=0.0001)
    assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=0.0001)
    print('Everything ok!')
    if pytorch_dump_folder_path:
        logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...')
        Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
        model.save_pretrained(pytorch_dump_folder_path)
        processor.save_pretrained(pytorch_dump_folder_path)
    if push_to_hub:
        print('Pushing model and processor to hub...')
        model.push_to_hub(f'jozhang97/{model_name}')
        processor.push_to_hub(f'jozhang97/{model_name}')
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', type=str, default='deta-swin-large', choices=['deta-swin-large', 'deta-swin-large-o365'], help="Name of the model you'd like to convert.")
    parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.')
    parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.')
    args = parser.parse_args()
    convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)

# File: transformers-main/src/transformers/models/deprecated/deta/image_processing_deta.py
""""""
import pathlib
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ....feature_extraction_utils import BatchFeature
from ....image_processing_utils import BaseImageProcessor, get_size_dict
from ....image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, pad, rescale, resize, rgb_to_id, to_channel_dimension_format
from ....image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_annotations, validate_preprocess_arguments
from ....utils import is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_torchvision_available, is_vision_available, logging
from ....utils.generic import TensorType
if is_torch_available():
    import torch
if is_torchvision_available():
    from torchvision.ops.boxes import batched_nms
if is_vision_available():
    import PIL
logger = logging.get_logger(__name__)
SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)

def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
    (height, width) = image_size
    raw_size = None
    if max_size is not None:
        min_original_size = float(min((height, width)))
        max_original_size = float(max((height, width)))
        if max_original_size / min_original_size * size > max_size:
            raw_size = max_size * min_original_size / max_original_size
            size = int(round(raw_size))
    if height <= width and height == size or (width <= height and width == size):
        (oh, ow) = (height, width)
    elif width < height:
        ow = size
        if max_size is not None and raw_size is not None:
            oh = int(raw_size * height / width)
        else:
            oh = int(size * height / width)
    else:
        oh = size
        if max_size is not None and raw_size is not None:
            ow = int(raw_size * width / height)
        else:
            ow = int(size * width / height)
    return (oh, ow)

def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    if isinstance(size, (list, tuple)):
        return size
    return get_size_with_aspect_ratio(image_size, size, max_size)

def get_image_size_for_max_height_width(input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
    image_size = get_image_size(input_image, input_data_format)
    (height, width) = image_size
    height_scale = max_height / height
    width_scale = max_width / width
    min_scale = min(height_scale, width_scale)
    new_height = int(height * min_scale)
    new_width = int(width * min_scale)
    return (new_height, new_width)

def get_numpy_to_framework_fn(arr) -> Callable:
    if isinstance(arr, np.ndarray):
        return np.array
    if is_tf_available() and is_tf_tensor(arr):
        import tensorflow as tf
        return tf.convert_to_tensor
    if is_torch_available() and is_torch_tensor(arr):
        import torch
        return torch.tensor
    if is_flax_available() and is_jax_tensor(arr):
        import jax.numpy as jnp
        return jnp.array
    raise ValueError(f'Cannot convert arrays of type {type(arr)}')

def safe_squeeze(arr: np.ndarray, axis: Optional[int]=None) -> np.ndarray:
    if axis is None:
        return arr.squeeze()
    try:
        return arr.squeeze(axis=axis)
    except ValueError:
        return arr

def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
    (image_height, image_width) = image_size
    norm_annotation = {}
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            boxes = corners_to_center_format(boxes)
            boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
            norm_annotation[key] = boxes
        else:
            norm_annotation[key] = value
    return norm_annotation

def max_across_indices(values: Iterable[Any]) -> List[Any]:
    return [max(values_i) for values_i in zip(*values)]

def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]:
    if input_data_format is None:
        input_data_format = infer_channel_dimension_format(images[0])
    if input_data_format == ChannelDimension.FIRST:
        (_, max_height, max_width) = max_across_indices([img.shape for img in images])
    elif input_data_format == ChannelDimension.LAST:
        (max_height, max_width, _) = max_across_indices([img.shape for img in images])
    else:
        raise ValueError(f'Invalid channel dimension format: {input_data_format}')
    return (max_height, max_width)

def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
    (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
    mask = np.zeros(output_size, dtype=np.int64)
    mask[:input_height, :input_width] = 1
    return mask

def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
    try:
        from pycocotools import mask as coco_mask
    except ImportError:
        raise ImportError('Pycocotools is not installed in your environment.')
    masks = []
    for polygons in segmentations:
        rles = coco_mask.frPyObjects(polygons, height, width)
        mask = coco_mask.decode(rles)
        if len(mask.shape) < 3:
            mask = mask[..., None]
        mask = np.asarray(mask, dtype=np.uint8)
        mask = np.any(mask, axis=2)
        masks.append(mask)
    if masks:
        masks = np.stack(masks, axis=0)
    else:
        masks = np.zeros((0, height, width), dtype=np.uint8)
    return masks

def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool=False, input_data_format: Optional[Union[ChannelDimension, str]]=None):
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    image_id = target['image_id']
    image_id = np.asarray([image_id], dtype=np.int64)
    annotations = target['annotations']
    annotations = [obj for obj in annotations if 'iscrowd' not in obj or obj['iscrowd'] == 0]
    classes = [obj['category_id'] for obj in annotations]
    classes = np.asarray(classes, dtype=np.int64)
    area = np.asarray([obj['area'] for obj in annotations], dtype=np.float32)
    iscrowd = np.asarray([obj['iscrowd'] if 'iscrowd' in obj else 0 for obj in annotations], dtype=np.int64)
    boxes = [obj['bbox'] for obj in annotations]
    boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
    boxes[:, 2:] += boxes[:, :2]
    boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
    boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
    keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
    new_target = {}
    new_target['image_id'] = image_id
    new_target['class_labels'] = classes[keep]
    new_target['boxes'] = boxes[keep]
    new_target['area'] = area[keep]
    new_target['iscrowd'] = iscrowd[keep]
    new_target['orig_size'] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
    if annotations and 'keypoints' in annotations[0]:
        keypoints = [obj['keypoints'] for obj in annotations]
        keypoints = np.asarray(keypoints, dtype=np.float32)
        keypoints = keypoints[keep]
        num_keypoints = keypoints.shape[0]
        keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
        new_target['keypoints'] = keypoints
    if return_segmentation_masks:
        segmentation_masks = [obj['segmentation'] for obj in annotations]
        masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
        new_target['masks'] = masks[keep]
    return new_target

def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
    if masks.size == 0:
        return np.zeros((0, 4))
    (h, w) = masks.shape[-2:]
    y = np.arange(0, h, dtype=np.float32)
    x = np.arange(0, w, dtype=np.float32)
    (y, x) = np.meshgrid(y, x, indexing='ij')
    x_mask = masks * np.expand_dims(x, axis=0)
    x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
    x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool))
    x_min = x.filled(fill_value=100000000.0)
    x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
    y_mask = masks * np.expand_dims(y, axis=0)
    y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
    y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool))
    y_min = y.filled(fill_value=100000000.0)
    y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
    return np.stack([x_min, y_min, x_max, y_max], 1)

def prepare_coco_panoptic_annotation(image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool=True, input_data_format: Union[ChannelDimension, str]=None) -> Dict:
    (image_height, image_width) = get_image_size(image, channel_dim=input_data_format)
    annotation_path = pathlib.Path(masks_path) / target['file_name']
    new_target = {}
    new_target['image_id'] = np.asarray([target['image_id'] if 'image_id' in target else target['id']], dtype=np.int64)
    new_target['size'] = np.asarray([image_height, image_width], dtype=np.int64)
    new_target['orig_size'] = np.asarray([image_height, image_width], dtype=np.int64)
    if 'segments_info' in target:
        masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
        masks = rgb_to_id(masks)
        ids = np.array([segment_info['id'] for segment_info in target['segments_info']])
        masks = masks == ids[:, None, None]
        masks = masks.astype(np.uint8)
        if return_masks:
            new_target['masks'] = masks
        new_target['boxes'] = masks_to_boxes(masks)
        new_target['class_labels'] = np.array([segment_info['category_id'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['iscrowd'] = np.asarray([segment_info['iscrowd'] for segment_info in target['segments_info']], dtype=np.int64)
        new_target['area'] = np.asarray([segment_info['area'] for segment_info in target['segments_info']], dtype=np.float32)
    return new_target

def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):
    ratios = tuple((float(s) / float(s_orig) for (s, s_orig) in zip(target_size, orig_size)))
    (ratio_height, ratio_width) = ratios
    new_annotation = {}
    new_annotation['size'] = target_size
    for (key, value) in annotation.items():
        if key == 'boxes':
            boxes = value
            scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
            new_annotation['boxes'] = scaled_boxes
        elif key == 'area':
            area = value
            scaled_area = area * (ratio_width * ratio_height)
            new_annotation['area'] = scaled_area
        elif key == 'masks':
            masks = value[:, None]
            masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
            masks = masks.astype(np.float32)
            masks = masks[:, 0] > threshold
            new_annotation['masks'] = masks
        elif key == 'size':
            new_annotation['size'] = target_size
        else:
            new_annotation[key] = value
    return new_annotation

class DetaImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values', 'pixel_mask']

    def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Union[float, List[float]]=None, image_std: Union[float, List[float]]=None, do_convert_annotations: bool=True, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None:
        if 'pad_and_return_pixel_mask' in kwargs:
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333}
        size = get_size_dict(size, default_to_square=False)
        if do_convert_annotations is None:
            do_convert_annotations = do_normalize
        super().__init__(**kwargs)
        self.format = format
        self.do_resize = do_resize
        self.size = size
        self.resample = resample
        self.do_rescale = do_rescale
        self.rescale_factor = rescale_factor
        self.do_normalize = do_normalize
        self.do_convert_annotations = do_convert_annotations
        self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
        self.do_pad = do_pad
        self.pad_size = pad_size

    def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict:
        format = format if format is not None else self.format
        if format == AnnotationFormat.COCO_DETECTION:
            return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format)
        elif format == AnnotationFormat.COCO_PANOPTIC:
            return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
            target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format)
        else:
            raise ValueError(f'Format {format} is not supported.')
        return target

    def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        size = get_size_dict(size, default_to_square=False)
        if 'shortest_edge' in size and 'longest_edge' in size:
            new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format)
        elif 'height' in size and 'width' in size:
            new_size = (size['height'], size['width'])
        elif 'max_height' in size and 'max_width' in size:
            new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format)
        else:
            raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
        image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format)
        return image

    def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict:
        return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)

    def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
        return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)

    def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
        return normalize_annotation(annotation, image_size=image_size)

    def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict:
        new_annotation = {}
        new_annotation['size'] = output_image_size
        for (key, value) in annotation.items():
            if key == 'masks':
                masks = value
                masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST)
                masks = safe_squeeze(masks, 1)
                new_annotation['masks'] = masks
            elif key == 'boxes' and update_bboxes:
                boxes = value
                boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]])
                new_annotation['boxes'] = boxes
            elif key == 'size':
                new_annotation['size'] = output_image_size
            else:
                new_annotation[key] = value
        return new_annotation

    def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray:
        (input_height, input_width) = get_image_size(image, channel_dim=input_data_format)
        (output_height, output_width) = output_size
        pad_bottom = output_height - input_height
        pad_right = output_width - input_width
        padding = ((0, pad_bottom), (0, pad_right))
        padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
        if annotation is not None:
            annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes)
        return (padded_image, annotation)

    def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:
        pad_size = pad_size if pad_size is not None else self.pad_size
        if pad_size is not None:
            padded_size = (pad_size['height'], pad_size['width'])
        else:
            padded_size = get_max_height_width(images, input_data_format=input_data_format)
        annotation_list = annotations if annotations is not None else [None] * len(images)
        padded_images = []
        padded_annotations = []
        for (image, annotation) in zip(images, annotation_list):
            (padded_image, padded_annotation) = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)
            padded_images.append(padded_image)
            padded_annotations.append(padded_annotation)
        data = {'pixel_values': padded_images}
        if return_pixel_mask:
            masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]
            data['pixel_mask'] = masks
        encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
        if annotations is not None:
            encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]
        return encoded_inputs

    def preprocess(self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_annotations: Optional[bool]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature:
        if 'pad_and_return_pixel_mask' in kwargs:
            logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.')
            do_pad = kwargs.pop('pad_and_return_pixel_mask')
        do_resize = self.do_resize if do_resize is None else do_resize
        size = self.size if size is None else size
        size = get_size_dict(size=size, default_to_square=False)
        resample = self.resample if resample is None else resample
        do_rescale = self.do_rescale if do_rescale is None else do_rescale
        rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
        do_normalize = self.do_normalize if do_normalize is None else do_normalize
        image_mean = self.image_mean if image_mean is None else image_mean
        image_std = self.image_std if image_std is None else image_std
        do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
        do_pad = self.do_pad if do_pad is None else do_pad
        pad_size = self.pad_size if pad_size is None else pad_size
        format = self.format if format is None else format
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
        if not is_batched(images):
            images = [images]
            annotations = [annotations] if annotations is not None else None
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        if annotations is not None and len(images) != len(annotations):
            raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.')
        format = AnnotationFormat(format)
        if annotations is not None:
            validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
        if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))):
            raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.')
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        if annotations is not None:
            prepared_images = []
            prepared_annotations = []
            for (image, target) in zip(images, annotations):
                target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format)
                prepared_images.append(image)
                prepared_annotations.append(target)
            images = prepared_images
            annotations = prepared_annotations
            del prepared_images, prepared_annotations
        if do_resize:
            if annotations is not None:
                (resized_images, resized_annotations) = ([], [])
                for (image, target) in zip(images, annotations):
                    orig_size = get_image_size(image, input_data_format)
                    resized_image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
                    resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format))
                    resized_images.append(resized_image)
                    resized_annotations.append(resized_annotation)
                images = resized_images
                annotations = resized_annotations
                del resized_images, resized_annotations
            else:
                images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
        if do_rescale:
            images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
        if do_normalize:
            images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]
        if do_convert_annotations and annotations is not None:
            annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for (annotation, image) in zip(annotations, images)]
        if do_pad:
            encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, return_tensors=return_tensors, update_bboxes=do_convert_annotations, pad_size=pad_size)
        else:
            images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
            encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
            if annotations is not None:
                encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations]
        return encoded_inputs

    def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, nms_threshold: float=0.7):
        (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes)
        (batch_size, num_queries, num_labels) = out_logits.shape
        if target_sizes is not None:
            if len(out_logits) != len(target_sizes):
                raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
        prob = out_logits.sigmoid()
        all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)
        all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)
        all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode='floor')
        all_labels = all_indexes % out_logits.shape[2]
        boxes = center_to_corners_format(out_bbox)
        boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))
        if target_sizes is not None:
            if isinstance(target_sizes, List):
                img_h = torch.Tensor([i[0] for i in target_sizes])
                img_w = torch.Tensor([i[1] for i in target_sizes])
            else:
                (img_h, img_w) = target_sizes.unbind(1)
            scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
            boxes = boxes * scale_fct[:, None, :]
        results = []
        for b in range(batch_size):
            box = boxes[b]
            score = all_scores[b]
            lbls = all_labels[b]
            pre_topk = score.topk(min(10000, num_queries * num_labels)).indices
            box = box[pre_topk]
            score = score[pre_topk]
            lbls = lbls[pre_topk]
            keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]
            score = score[keep_inds]
            lbls = lbls[keep_inds]
            box = box[keep_inds]
            results.append({'scores': score[score > threshold], 'labels': lbls[score > threshold], 'boxes': box[score > threshold]})
        return results

# File: transformers-main/src/transformers/models/deprecated/deta/modeling_deta.py
""""""
import copy
import math
import os
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from ....activations import ACT2FN
from ....file_utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_torch_cuda_available, is_vision_available, replace_return_docstrings
from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
from ....modeling_outputs import BaseModelOutput
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import meshgrid
from ....utils import is_accelerate_available, is_ninja_available, is_torchvision_available, logging, requires_backends
from ....utils.backbone_utils import load_backbone
from .configuration_deta import DetaConfig
logger = logging.get_logger(__name__)
MultiScaleDeformableAttention = None

def load_cuda_kernels():
    from torch.utils.cpp_extension import load
    global MultiScaleDeformableAttention
    root = Path(__file__).resolve().parent.parent.parent / 'kernels' / 'deta'
    src_files = [root / filename for filename in ['vision.cpp', os.path.join('cpu', 'ms_deform_attn_cpu.cpp'), os.path.join('cuda', 'ms_deform_attn_cuda.cu')]]
    load('MultiScaleDeformableAttention', src_files, with_cuda=True, extra_include_paths=[str(root)], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'])

class MultiScaleDeformableAttentionFunction(Function):

    @staticmethod
    def forward(context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
        context.im2col_step = im2col_step
        output = MultiScaleDeformableAttention.ms_deform_attn_forward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step)
        context.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
        return output

    @staticmethod
    @once_differentiable
    def backward(context, grad_output):
        (value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) = context.saved_tensors
        (grad_value, grad_sampling_loc, grad_attn_weight) = MultiScaleDeformableAttention.ms_deform_attn_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step)
        return (grad_value, None, None, grad_sampling_loc, grad_attn_weight, None)
if is_accelerate_available():
    from accelerate import PartialState
    from accelerate.utils import reduce
if is_vision_available():
    from transformers.image_transforms import center_to_corners_format
if is_torchvision_available():
    from torchvision.ops.boxes import batched_nms
if is_scipy_available():
    from scipy.optimize import linear_sum_assignment
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'DetaConfig'
_CHECKPOINT_FOR_DOC = 'jozhang97/deta-swin-large-o365'

@dataclass
class DetaDecoderOutput(ModelOutput):
    last_hidden_state: torch.FloatTensor = None
    intermediate_hidden_states: torch.FloatTensor = None
    intermediate_reference_points: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None

@dataclass
class DetaModelOutput(ModelOutput):
    init_reference_points: torch.FloatTensor = None
    last_hidden_state: torch.FloatTensor = None
    intermediate_hidden_states: torch.FloatTensor = None
    intermediate_reference_points: torch.FloatTensor = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    enc_outputs_class: Optional[torch.FloatTensor] = None
    enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
    output_proposals: Optional[torch.FloatTensor] = None

@dataclass
class DetaObjectDetectionOutput(ModelOutput):
    loss: Optional[torch.FloatTensor] = None
    loss_dict: Optional[Dict] = None
    logits: torch.FloatTensor = None
    pred_boxes: torch.FloatTensor = None
    auxiliary_outputs: Optional[List[Dict]] = None
    init_reference_points: Optional[torch.FloatTensor] = None
    last_hidden_state: Optional[torch.FloatTensor] = None
    intermediate_hidden_states: Optional[torch.FloatTensor] = None
    intermediate_reference_points: Optional[torch.FloatTensor] = None
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    enc_outputs_class: Optional = None
    enc_outputs_coord_logits: Optional = None
    output_proposals: Optional[torch.FloatTensor] = None

def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])

def inverse_sigmoid(x, eps=1e-05):
    x = x.clamp(min=0, max=1)
    x1 = x.clamp(min=eps)
    x2 = (1 - x).clamp(min=eps)
    return torch.log(x1 / x2)

class DetaFrozenBatchNorm2d(nn.Module):

    def __init__(self, n):
        super().__init__()
        self.register_buffer('weight', torch.ones(n))
        self.register_buffer('bias', torch.zeros(n))
        self.register_buffer('running_mean', torch.zeros(n))
        self.register_buffer('running_var', torch.ones(n))

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
        num_batches_tracked_key = prefix + 'num_batches_tracked'
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]
        super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)

    def forward(self, x):
        weight = self.weight.reshape(1, -1, 1, 1)
        bias = self.bias.reshape(1, -1, 1, 1)
        running_var = self.running_var.reshape(1, -1, 1, 1)
        running_mean = self.running_mean.reshape(1, -1, 1, 1)
        epsilon = 1e-05
        scale = weight * (running_var + epsilon).rsqrt()
        bias = bias - running_mean * scale
        return x * scale + bias

def replace_batch_norm(model):
    for (name, module) in model.named_children():
        if isinstance(module, nn.BatchNorm2d):
            new_module = DetaFrozenBatchNorm2d(module.num_features)
            if not module.weight.device == torch.device('meta'):
                new_module.weight.data.copy_(module.weight)
                new_module.bias.data.copy_(module.bias)
                new_module.running_mean.data.copy_(module.running_mean)
                new_module.running_var.data.copy_(module.running_var)
            model._modules[name] = new_module
        if len(list(module.children())) > 0:
            replace_batch_norm(module)

class DetaBackboneWithPositionalEncodings(nn.Module):

    def __init__(self, config):
        super().__init__()
        backbone = load_backbone(config)
        with torch.no_grad():
            replace_batch_norm(backbone)
        self.model = backbone
        self.intermediate_channel_sizes = self.model.channels
        if config.backbone_config.model_type == 'resnet':
            for (name, parameter) in self.model.named_parameters():
                if 'stages.1' not in name and 'stages.2' not in name and ('stages.3' not in name):
                    parameter.requires_grad_(False)
        self.position_embedding = build_position_encoding(config)

    def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
        features = self.model(pixel_values).feature_maps
        out = []
        pos = []
        for feature_map in features:
            mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
            position_embeddings = self.position_embedding(feature_map, mask).to(feature_map.dtype)
            out.append((feature_map, mask))
            pos.append(position_embeddings)
        return (out, pos)

class DetaSinePositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.temperature = temperature
        self.normalize = normalize
        if scale is not None and normalize is False:
            raise ValueError('normalize should be True if scale is passed')
        if scale is None:
            scale = 2 * math.pi
        self.scale = scale

    def forward(self, pixel_values, pixel_mask):
        if pixel_mask is None:
            raise ValueError('No pixel mask provided')
        y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
        x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
        if self.normalize:
            eps = 1e-06
            y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
            x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
        dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
        dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
        pos_x = x_embed[:, :, :, None] / dim_t
        pos_y = y_embed[:, :, :, None] / dim_t
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
        return pos

class DetaLearnedPositionEmbedding(nn.Module):

    def __init__(self, embedding_dim=256):
        super().__init__()
        self.row_embeddings = nn.Embedding(50, embedding_dim)
        self.column_embeddings = nn.Embedding(50, embedding_dim)

    def forward(self, pixel_values, pixel_mask=None):
        (height, width) = pixel_values.shape[-2:]
        width_values = torch.arange(width, device=pixel_values.device)
        height_values = torch.arange(height, device=pixel_values.device)
        x_emb = self.column_embeddings(width_values)
        y_emb = self.row_embeddings(height_values)
        pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
        pos = pos.permute(2, 0, 1)
        pos = pos.unsqueeze(0)
        pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
        return pos

def build_position_encoding(config):
    n_steps = config.d_model // 2
    if config.position_embedding_type == 'sine':
        position_embedding = DetaSinePositionEmbedding(n_steps, normalize=True)
    elif config.position_embedding_type == 'learned':
        position_embedding = DetaLearnedPositionEmbedding(n_steps)
    else:
        raise ValueError(f'Not supported {config.position_embedding_type}')
    return position_embedding

def multi_scale_deformable_attention(value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor) -> Tensor:
    (batch_size, _, num_heads, hidden_dim) = value.shape
    (_, num_queries, num_heads, num_levels, num_points, _) = sampling_locations.shape
    value_list = value.split([height.item() * width.item() for (height, width) in value_spatial_shapes], dim=1)
    sampling_grids = 2 * sampling_locations - 1
    sampling_value_list = []
    for (level_id, (height, width)) in enumerate(value_spatial_shapes):
        value_l_ = value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width)
        sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
        sampling_value_l_ = nn.functional.grid_sample(value_l_, sampling_grid_l_, mode='bilinear', padding_mode='zeros', align_corners=False)
        sampling_value_list.append(sampling_value_l_)
    attention_weights = attention_weights.transpose(1, 2).reshape(batch_size * num_heads, 1, num_queries, num_levels * num_points)
    output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(batch_size, num_heads * hidden_dim, num_queries)
    return output.transpose(1, 2).contiguous()

class DetaMultiscaleDeformableAttention(nn.Module):

    def __init__(self, config: DetaConfig, num_heads: int, n_points: int):
        super().__init__()
        kernel_loaded = MultiScaleDeformableAttention is not None
        if is_torch_cuda_available() and is_ninja_available() and (not kernel_loaded):
            try:
                load_cuda_kernels()
            except Exception as e:
                logger.warning(f'Could not load the custom kernel for multi-scale deformable attention: {e}')
        if config.d_model % num_heads != 0:
            raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}')
        dim_per_head = config.d_model // num_heads
        if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
            warnings.warn("You'd better set embed_dim (d_model) in DetaMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
        self.im2col_step = 64
        self.d_model = config.d_model
        self.n_levels = config.num_feature_levels
        self.n_heads = num_heads
        self.n_points = n_points
        self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
        self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
        self.value_proj = nn.Linear(config.d_model, config.d_model)
        self.output_proj = nn.Linear(config.d_model, config.d_model)
        self.disable_custom_kernels = config.disable_custom_kernels
        self._reset_parameters()

    def _reset_parameters(self):
        nn.init.constant_(self.sampling_offsets.weight.data, 0.0)
        default_dtype = torch.get_default_dtype()
        thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
        for i in range(self.n_points):
            grid_init[:, :, i, :] *= i + 1
        with torch.no_grad():
            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
        nn.init.constant_(self.attention_weights.weight.data, 0.0)
        nn.init.constant_(self.attention_weights.bias.data, 0.0)
        nn.init.xavier_uniform_(self.value_proj.weight.data)
        nn.init.constant_(self.value_proj.bias.data, 0.0)
        nn.init.xavier_uniform_(self.output_proj.weight.data)
        nn.init.constant_(self.output_proj.bias.data, 0.0)

    def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
        return tensor if position_embeddings is None else tensor + position_embeddings

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
        if position_embeddings is not None:
            hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
        (batch_size, num_queries, _) = hidden_states.shape
        (batch_size, sequence_length, _) = encoder_hidden_states.shape
        if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
            raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
        value = self.value_proj(encoder_hidden_states)
        if attention_mask is not None:
            value = value.masked_fill(~attention_mask[..., None], float(0))
        value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
        sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
        attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
        attention_weights = F.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
        num_coordinates = reference_points.shape[-1]
        if num_coordinates == 2:
            offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
            sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
        elif num_coordinates == 4:
            sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
        else:
            raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
        if self.disable_custom_kernels:
            output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
        else:
            try:
                output = MultiScaleDeformableAttentionFunction.apply(value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step)
            except Exception:
                output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
        output = self.output_proj(output)
        return (output, attention_weights)

class DetaMultiheadAttention(nn.Module):

    def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
        return tensor if position_embeddings is None else tensor + position_embeddings

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        (batch_size, target_len, embed_dim) = hidden_states.size()
        if position_embeddings is not None:
            hidden_states_original = hidden_states
            hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
        query_states = self.q_proj(hidden_states) * self.scaling
        key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
        value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
        proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
        query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
        key_states = key_states.view(*proj_shape)
        value_states = value_states.view(*proj_shape)
        source_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
            raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
        if attention_mask is not None:
            if attention_mask.size() != (batch_size, 1, target_len, source_len):
                raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
            attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
            attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped)

class DetaEncoderLayer(nn.Module):

    def __init__(self, config: DetaConfig):
        super().__init__()
        self.embed_dim = config.d_model
        self.self_attn = DetaMultiscaleDeformableAttention(config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points)
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
        self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
        residual = hidden_states
        (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        if self.training:
            if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
                clamp_value = torch.finfo(hidden_states.dtype).max - 1000
                hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (attn_weights,)
        return outputs

class DetaDecoderLayer(nn.Module):

    def __init__(self, config: DetaConfig):
        super().__init__()
        self.embed_dim = config.d_model
        self.self_attn = DetaMultiheadAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout)
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.encoder_attn = DetaMultiscaleDeformableAttention(config, num_heads=config.decoder_attention_heads, n_points=config.decoder_n_points)
        self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
        self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
        residual = hidden_states
        (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        second_residual = hidden_states
        cross_attn_weights = None
        (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = second_residual + hidden_states
        hidden_states = self.encoder_attn_layer_norm(hidden_states)
        residual = hidden_states
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights, cross_attn_weights)
        return outputs

class DetaPreTrainedModel(PreTrainedModel):
    config_class = DetaConfig
    base_model_prefix = 'model'
    main_input_name = 'pixel_values'
    _no_split_modules = ['DetaBackboneWithPositionalEncodings', 'DetaEncoderLayer', 'DetaDecoderLayer']
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        std = self.config.init_std
        if isinstance(module, DetaLearnedPositionEmbedding):
            nn.init.uniform_(module.row_embeddings.weight)
            nn.init.uniform_(module.column_embeddings.weight)
        elif isinstance(module, DetaMultiscaleDeformableAttention):
            module._reset_parameters()
        elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        if hasattr(module, 'reference_points') and (not self.config.two_stage):
            nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
            nn.init.constant_(module.reference_points.bias.data, 0.0)
        if hasattr(module, 'level_embed'):
            nn.init.normal_(module.level_embed)
DETA_START_DOCSTRING = '\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`DetaConfig`]):\n            Model configuration class with all the parameters of the model. Initializing with a config file does not\n            load the weights associated with the model, only the configuration. Check out the\n            [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
DETA_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Padding will be ignored by default should you provide it.\n\n            Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details.\n\n        pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n            Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n            - 1 for pixels that are real (i.e. **not masked**),\n            - 0 for pixels that are padding (i.e. **masked**).\n\n            [What are attention masks?](../glossary#attention-mask)\n\n        decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n            Not used by default. Can be used to mask object queries.\n        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n            can choose to directly pass a flattened representation of an image.\n        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n            Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n            embedded representation.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'

class DetaEncoder(DetaPreTrainedModel):

    def __init__(self, config: DetaConfig):
        super().__init__(config)
        self.dropout = config.dropout
        self.layers = nn.ModuleList([DetaEncoderLayer(config) for _ in range(config.encoder_layers)])
        self.gradient_checkpointing = False
        self.post_init()

    @staticmethod
    def get_reference_points(spatial_shapes, valid_ratios, device):
        reference_points_list = []
        for (level, (height, width)) in enumerate(spatial_shapes):
            (ref_y, ref_x) = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing='ij')
            ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
            ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
            ref = torch.stack((ref_x, ref_y), -1)
            reference_points_list.append(ref)
        reference_points = torch.cat(reference_points_list, 1)
        reference_points = reference_points[:, :, None] * valid_ratios[:, None]
        return reference_points

    def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        hidden_states = inputs_embeds
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
        encoder_states = () if output_hidden_states else None
        all_attentions = () if output_attentions else None
        for (i, encoder_layer) in enumerate(self.layers):
            if output_hidden_states:
                encoder_states = encoder_states + (hidden_states,)
            layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
            hidden_states = layer_outputs[0]
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        if output_hidden_states:
            encoder_states = encoder_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)

class DetaDecoder(DetaPreTrainedModel):

    def __init__(self, config: DetaConfig):
        super().__init__(config)
        self.dropout = config.dropout
        self.layers = nn.ModuleList([DetaDecoderLayer(config) for _ in range(config.decoder_layers)])
        self.gradient_checkpointing = False
        self.bbox_embed = None
        self.class_embed = None
        self.post_init()

    def forward(self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if inputs_embeds is not None:
            hidden_states = inputs_embeds
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
        intermediate = ()
        intermediate_reference_points = ()
        for (idx, decoder_layer) in enumerate(self.layers):
            if reference_points.shape[-1] == 4:
                reference_points_input = reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
            else:
                if reference_points.shape[-1] != 2:
                    raise ValueError("Reference points' last dimension must be of size 2")
                reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, position_embeddings, reference_points_input, spatial_shapes, level_start_index, encoder_hidden_states, encoder_attention_mask, output_attentions)
            else:
                layer_outputs = decoder_layer(hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
            hidden_states = layer_outputs[0]
            if self.bbox_embed is not None:
                tmp = self.bbox_embed[idx](hidden_states)
                if reference_points.shape[-1] == 4:
                    new_reference_points = tmp + inverse_sigmoid(reference_points)
                    new_reference_points = new_reference_points.sigmoid()
                else:
                    if reference_points.shape[-1] != 2:
                        raise ValueError(f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}")
                    new_reference_points = tmp
                    new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
                    new_reference_points = new_reference_points.sigmoid()
                reference_points = new_reference_points.detach()
            intermediate += (hidden_states,)
            intermediate_reference_points += (reference_points,)
            if output_attentions:
                all_self_attns += (layer_outputs[1],)
                if encoder_hidden_states is not None:
                    all_cross_attentions += (layer_outputs[2],)
        intermediate = torch.stack(intermediate, dim=1)
        intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
        if output_hidden_states:
            all_hidden_states += (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
        return DetaDecoderOutput(last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)

@add_start_docstrings('\n    The bare DETA Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without\n    any specific head on top.\n    ', DETA_START_DOCSTRING)
class DetaModel(DetaPreTrainedModel):

    def __init__(self, config: DetaConfig):
        super().__init__(config)
        if config.two_stage:
            requires_backends(self, ['torchvision'])
        self.backbone = DetaBackboneWithPositionalEncodings(config)
        intermediate_channel_sizes = self.backbone.intermediate_channel_sizes
        if config.num_feature_levels > 1:
            num_backbone_outs = len(intermediate_channel_sizes)
            input_proj_list = []
            for _ in range(num_backbone_outs):
                in_channels = intermediate_channel_sizes[_]
                input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model)))
            for _ in range(config.num_feature_levels - num_backbone_outs):
                input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model)))
                in_channels = config.d_model
            self.input_proj = nn.ModuleList(input_proj_list)
        else:
            self.input_proj = nn.ModuleList([nn.Sequential(nn.Conv2d(intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model))])
        if not config.two_stage:
            self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2)
        self.encoder = DetaEncoder(config)
        self.decoder = DetaDecoder(config)
        self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
        if config.two_stage:
            self.enc_output = nn.Linear(config.d_model, config.d_model)
            self.enc_output_norm = nn.LayerNorm(config.d_model)
            self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2)
            self.pos_trans_norm = nn.LayerNorm(config.d_model * 2)
            self.pix_trans = nn.Linear(config.d_model, config.d_model)
            self.pix_trans_norm = nn.LayerNorm(config.d_model)
        else:
            self.reference_points = nn.Linear(config.d_model, 2)
        self.assign_first_stage = config.assign_first_stage
        self.two_stage_num_proposals = config.two_stage_num_proposals
        self.post_init()

    def get_encoder(self):
        return self.encoder

    def get_decoder(self):
        return self.decoder

    def freeze_backbone(self):
        for (name, param) in self.backbone.model.named_parameters():
            param.requires_grad_(False)

    def unfreeze_backbone(self):
        for (name, param) in self.backbone.model.named_parameters():
            param.requires_grad_(True)

    def get_valid_ratio(self, mask, dtype=torch.float32):
        (_, height, width) = mask.shape
        valid_height = torch.sum(mask[:, :, 0], 1)
        valid_width = torch.sum(mask[:, 0, :], 1)
        valid_ratio_height = valid_height.to(dtype) / height
        valid_ratio_width = valid_width.to(dtype) / width
        valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
        return valid_ratio

    def get_proposal_pos_embed(self, proposals):
        num_pos_feats = self.config.d_model // 2
        temperature = 10000
        scale = 2 * math.pi
        dim_t = torch.arange(num_pos_feats, dtype=torch.int64, device=proposals.device).float()
        dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats)
        proposals = proposals.sigmoid() * scale
        pos = proposals[:, :, :, None] / dim_t
        pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
        return pos

    def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
        batch_size = enc_output.shape[0]
        proposals = []
        _cur = 0
        level_ids = []
        for (level, (height, width)) in enumerate(spatial_shapes):
            mask_flatten_ = padding_mask[:, _cur:_cur + height * width].view(batch_size, height, width, 1)
            valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
            valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
            (grid_y, grid_x) = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')
            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
            scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
            grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
            width_heigth = torch.ones_like(grid) * 0.05 * 2.0 ** level
            proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4)
            proposals.append(proposal)
            _cur += height * width
            level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level)
        output_proposals = torch.cat(proposals, 1)
        output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
        output_proposals = torch.log(output_proposals / (1 - output_proposals))
        output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))
        output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
        object_query = enc_output
        object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
        object_query = object_query.masked_fill(~output_proposals_valid, float(0))
        object_query = self.enc_output_norm(self.enc_output(object_query))
        level_ids = torch.cat(level_ids)
        return (object_query, output_proposals, level_ids)

    @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=DetaModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetaModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        (batch_size, num_channels, height, width) = pixel_values.shape
        device = pixel_values.device
        if pixel_mask is None:
            pixel_mask = torch.ones((batch_size, height, width), dtype=torch.long, device=device)
        (features, position_embeddings_list) = self.backbone(pixel_values, pixel_mask)
        sources = []
        masks = []
        for (level, (source, mask)) in enumerate(features):
            sources.append(self.input_proj[level](source))
            masks.append(mask)
            if mask is None:
                raise ValueError('No attention mask was provided')
        if self.config.num_feature_levels > len(sources):
            _len_sources = len(sources)
            for level in range(_len_sources, self.config.num_feature_levels):
                if level == _len_sources:
                    source = self.input_proj[level](features[-1][0])
                else:
                    source = self.input_proj[level](sources[-1])
                mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
                pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
                sources.append(source)
                masks.append(mask)
                position_embeddings_list.append(pos_l)
        query_embeds = None
        if not self.config.two_stage:
            query_embeds = self.query_position_embeddings.weight
        spatial_shapes = [source.shape[2:] for source in sources]
        source_flatten = [source.flatten(2).transpose(1, 2) for source in sources]
        mask_flatten = [mask.flatten(1) for mask in masks]
        lvl_pos_embed_flatten = []
        for (level, pos_embed) in enumerate(position_embeddings_list):
            pos_embed = pos_embed.flatten(2).transpose(1, 2)
            lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
            lvl_pos_embed_flatten.append(lvl_pos_embed)
        source_flatten = torch.cat(source_flatten, 1)
        mask_flatten = torch.cat(mask_flatten, 1)
        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
        level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
        valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
        valid_ratios = valid_ratios.float()
        if encoder_outputs is None:
            encoder_outputs = self.encoder(inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
            encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
        (batch_size, _, num_channels) = encoder_outputs[0].shape
        enc_outputs_class = None
        enc_outputs_coord_logits = None
        output_proposals = None
        if self.config.two_stage:
            (object_query_embedding, output_proposals, level_ids) = self.gen_encoder_output_proposals(encoder_outputs[0], ~mask_flatten, spatial_shapes)
            enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding)
            delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding)
            enc_outputs_coord_logits = delta_bbox + output_proposals
            topk = self.two_stage_num_proposals
            proposal_logit = enc_outputs_class[..., 0]
            if self.assign_first_stage:
                proposal_boxes = center_to_corners_format(enc_outputs_coord_logits.sigmoid().float()).clamp(0, 1)
                topk_proposals = []
                for b in range(batch_size):
                    prop_boxes_b = proposal_boxes[b]
                    prop_logits_b = proposal_logit[b]
                    pre_nms_topk = 1000
                    pre_nms_inds = []
                    for lvl in range(len(spatial_shapes)):
                        lvl_mask = level_ids == lvl
                        pre_nms_inds.append(torch.topk(prop_logits_b.sigmoid() * lvl_mask, pre_nms_topk)[1])
                    pre_nms_inds = torch.cat(pre_nms_inds)
                    post_nms_inds = batched_nms(prop_boxes_b[pre_nms_inds], prop_logits_b[pre_nms_inds], level_ids[pre_nms_inds], 0.9)
                    keep_inds = pre_nms_inds[post_nms_inds]
                    if len(keep_inds) < self.two_stage_num_proposals:
                        print(f'[WARNING] nms proposals ({len(keep_inds)}) < {self.two_stage_num_proposals}, running naive topk')
                        keep_inds = torch.topk(proposal_logit[b], topk)[1]
                    q_per_l = topk // len(spatial_shapes)
                    is_level_ordered = level_ids[keep_inds][None] == torch.arange(len(spatial_shapes), device=level_ids.device)[:, None]
                    keep_inds_mask = is_level_ordered & (is_level_ordered.cumsum(1) <= q_per_l)
                    keep_inds_mask = keep_inds_mask.any(0)
                    if keep_inds_mask.sum() < topk:
                        num_to_add = topk - keep_inds_mask.sum()
                        pad_inds = (~keep_inds_mask).nonzero()[:num_to_add]
                        keep_inds_mask[pad_inds] = True
                    keep_inds_topk = keep_inds[keep_inds_mask]
                    topk_proposals.append(keep_inds_topk)
                topk_proposals = torch.stack(topk_proposals)
            else:
                topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
            topk_coords_logits = torch.gather(enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
            topk_coords_logits = topk_coords_logits.detach()
            reference_points = topk_coords_logits.sigmoid()
            init_reference_points = reference_points
            pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits)))
            (query_embed, target) = torch.split(pos_trans_out, num_channels, dim=2)
            topk_feats = torch.stack([object_query_embedding[b][topk_proposals[b]] for b in range(batch_size)]).detach()
            target = target + self.pix_trans_norm(self.pix_trans(topk_feats))
        else:
            (query_embed, target) = torch.split(query_embeds, num_channels, dim=1)
            query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1)
            target = target.unsqueeze(0).expand(batch_size, -1, -1)
            reference_points = self.reference_points(query_embed).sigmoid()
            init_reference_points = reference_points
        decoder_outputs = self.decoder(inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if not return_dict:
            enc_outputs = tuple((value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None))
            tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs
            return tuple_outputs
        return DetaModelOutput(init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, output_proposals=output_proposals)

@add_start_docstrings('\n    DETA Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks\n    such as COCO detection.\n    ', DETA_START_DOCSTRING)
class DetaForObjectDetection(DetaPreTrainedModel):
    _tied_weights_keys = ['bbox_embed\\.\\d+', 'class_embed\\.\\d+']
    _no_split_modules = None

    def __init__(self, config: DetaConfig):
        super().__init__(config)
        self.model = DetaModel(config)
        self.class_embed = nn.Linear(config.d_model, config.num_labels)
        self.bbox_embed = DetaMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3)
        prior_prob = 0.01
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value
        nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
        num_pred = config.decoder_layers + 1 if config.two_stage else config.decoder_layers
        if config.with_box_refine:
            self.class_embed = _get_clones(self.class_embed, num_pred)
            self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
            nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
            self.model.decoder.bbox_embed = self.bbox_embed
        else:
            nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
            self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
            self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
            self.model.decoder.bbox_embed = None
        if config.two_stage:
            self.model.decoder.class_embed = self.class_embed
            for box_embed in self.bbox_embed:
                nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
        self.post_init()

    @torch.jit.unused
    def _set_aux_loss(self, outputs_class, outputs_coord):
        aux_loss = [{'logits': logits, 'pred_boxes': pred_boxes} for (logits, pred_boxes) in zip(outputs_class.transpose(0, 1)[:-1], outputs_coord.transpose(0, 1)[:-1])]
        return aux_loss

    @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=DetaObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetaObjectDetectionOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2]
        init_reference = outputs.init_reference_points if return_dict else outputs[0]
        inter_references = outputs.intermediate_reference_points if return_dict else outputs[3]
        outputs_classes = []
        outputs_coords = []
        for level in range(hidden_states.shape[1]):
            if level == 0:
                reference = init_reference
            else:
                reference = inter_references[:, level - 1]
            reference = inverse_sigmoid(reference)
            outputs_class = self.class_embed[level](hidden_states[:, level])
            delta_bbox = self.bbox_embed[level](hidden_states[:, level])
            if reference.shape[-1] == 4:
                outputs_coord_logits = delta_bbox + reference
            elif reference.shape[-1] == 2:
                delta_bbox[..., :2] += reference
                outputs_coord_logits = delta_bbox
            else:
                raise ValueError(f'reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}')
            outputs_coord = outputs_coord_logits.sigmoid()
            outputs_classes.append(outputs_class)
            outputs_coords.append(outputs_coord)
        outputs_class = torch.stack(outputs_classes, dim=1)
        outputs_coord = torch.stack(outputs_coords, dim=1)
        logits = outputs_class[:, -1]
        pred_boxes = outputs_coord[:, -1]
        (loss, loss_dict, auxiliary_outputs) = (None, None, None)
        if labels is not None:
            matcher = DetaHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost)
            losses = ['labels', 'boxes', 'cardinality']
            criterion = DetaLoss(matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, num_queries=self.config.num_queries, assign_first_stage=self.config.assign_first_stage, assign_second_stage=self.config.assign_second_stage)
            criterion.to(logits.device)
            outputs_loss = {}
            outputs_loss['logits'] = logits
            outputs_loss['pred_boxes'] = pred_boxes
            outputs_loss['init_reference'] = init_reference
            if self.config.auxiliary_loss:
                auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
                outputs_loss['auxiliary_outputs'] = auxiliary_outputs
            if self.config.two_stage:
                enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid()
                outputs_loss['enc_outputs'] = {'logits': outputs.enc_outputs_class, 'pred_boxes': enc_outputs_coord, 'anchors': outputs.output_proposals.sigmoid()}
            loss_dict = criterion(outputs_loss, labels)
            weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient}
            weight_dict['loss_giou'] = self.config.giou_loss_coefficient
            if self.config.auxiliary_loss:
                aux_weight_dict = {}
                for i in range(self.config.decoder_layers - 1):
                    aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()})
                aux_weight_dict.update({k + '_enc': v for (k, v) in weight_dict.items()})
                weight_dict.update(aux_weight_dict)
            loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict))
        if not return_dict:
            if auxiliary_outputs is not None:
                output = (logits, pred_boxes) + auxiliary_outputs + outputs
            else:
                output = (logits, pred_boxes) + outputs
            tuple_outputs = (loss, loss_dict) + output if loss is not None else output
            return tuple_outputs
        dict_outputs = DetaObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, output_proposals=outputs.output_proposals)
        return dict_outputs

def dice_loss(inputs, targets, num_boxes):
    inputs = inputs.sigmoid()
    inputs = inputs.flatten(1)
    numerator = 2 * (inputs * targets).sum(1)
    denominator = inputs.sum(-1) + targets.sum(-1)
    loss = 1 - (numerator + 1) / (denominator + 1)
    return loss.sum() / num_boxes

def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2):
    prob = inputs.sigmoid()
    ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
    p_t = prob * targets + (1 - prob) * (1 - targets)
    loss = ce_loss * (1 - p_t) ** gamma
    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss = alpha_t * loss
    return loss.mean(1).sum() / num_boxes

class DetaLoss(nn.Module):

    def __init__(self, matcher, num_classes, focal_alpha, losses, num_queries, assign_first_stage=False, assign_second_stage=False):
        super().__init__()
        self.matcher = matcher
        self.num_classes = num_classes
        self.focal_alpha = focal_alpha
        self.losses = losses
        self.assign_first_stage = assign_first_stage
        self.assign_second_stage = assign_second_stage
        if self.assign_first_stage:
            self.stg1_assigner = DetaStage1Assigner()
        if self.assign_second_stage:
            self.stg2_assigner = DetaStage2Assigner(num_queries)

    def loss_labels(self, outputs, targets, indices, num_boxes):
        if 'logits' not in outputs:
            raise KeyError('No logits were found in the outputs')
        source_logits = outputs['logits']
        idx = self._get_source_permutation_idx(indices)
        target_classes_o = torch.cat([t['class_labels'][J] for (t, (_, J)) in zip(targets, indices)])
        target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device)
        target_classes[idx] = target_classes_o
        target_classes_onehot = torch.zeros([source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device)
        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
        target_classes_onehot = target_classes_onehot[:, :, :-1]
        loss_ce = sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1]
        losses = {'loss_ce': loss_ce}
        return losses

    @torch.no_grad()
    def loss_cardinality(self, outputs, targets, indices, num_boxes):
        logits = outputs['logits']
        device = logits.device
        target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device)
        card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
        card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
        losses = {'cardinality_error': card_err}
        return losses

    def loss_boxes(self, outputs, targets, indices, num_boxes):
        if 'pred_boxes' not in outputs:
            raise KeyError('No predicted boxes found in outputs')
        idx = self._get_source_permutation_idx(indices)
        source_boxes = outputs['pred_boxes'][idx]
        target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
        loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none')
        losses = {}
        losses['loss_bbox'] = loss_bbox.sum() / num_boxes
        loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)))
        losses['loss_giou'] = loss_giou.sum() / num_boxes
        return losses

    def _get_source_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(source, i) for (i, (source, _)) in enumerate(indices)])
        source_idx = torch.cat([source for (source, _) in indices])
        return (batch_idx, source_idx)

    def _get_target_permutation_idx(self, indices):
        batch_idx = torch.cat([torch.full_like(target, i) for (i, (_, target)) in enumerate(indices)])
        target_idx = torch.cat([target for (_, target) in indices])
        return (batch_idx, target_idx)

    def get_loss(self, loss, outputs, targets, indices, num_boxes):
        loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes}
        if loss not in loss_map:
            raise ValueError(f'Loss {loss} not supported')
        return loss_map[loss](outputs, targets, indices, num_boxes)

    def forward(self, outputs, targets):
        outputs_without_aux = {k: v for (k, v) in outputs.items() if k not in ('auxiliary_outputs', 'enc_outputs')}
        if self.assign_second_stage:
            indices = self.stg2_assigner(outputs_without_aux, targets)
        else:
            indices = self.matcher(outputs_without_aux, targets)
        num_boxes = sum((len(t['class_labels']) for t in targets))
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        world_size = 1
        if is_accelerate_available():
            if PartialState._shared_state != {}:
                num_boxes = reduce(num_boxes)
                world_size = PartialState().num_processes
        num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
        if 'auxiliary_outputs' in outputs:
            for (i, auxiliary_outputs) in enumerate(outputs['auxiliary_outputs']):
                if not self.assign_second_stage:
                    indices = self.matcher(auxiliary_outputs, targets)
                for loss in self.losses:
                    l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
                    l_dict = {k + f'_{i}': v for (k, v) in l_dict.items()}
                    losses.update(l_dict)
        if 'enc_outputs' in outputs:
            enc_outputs = outputs['enc_outputs']
            bin_targets = copy.deepcopy(targets)
            for bt in bin_targets:
                bt['class_labels'] = torch.zeros_like(bt['class_labels'])
            if self.assign_first_stage:
                indices = self.stg1_assigner(enc_outputs, bin_targets)
            else:
                indices = self.matcher(enc_outputs, bin_targets)
            for loss in self.losses:
                l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes)
                l_dict = {k + '_enc': v for (k, v) in l_dict.items()}
                losses.update(l_dict)
        return losses

class DetaMLPPredictionHead(nn.Module):

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim])))

    def forward(self, x):
        for (i, layer) in enumerate(self.layers):
            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

class DetaHungarianMatcher(nn.Module):

    def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1):
        super().__init__()
        requires_backends(self, ['scipy'])
        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0):
            raise ValueError("All costs of the Matcher can't be 0")

    @torch.no_grad()
    def forward(self, outputs, targets):
        (batch_size, num_queries) = outputs['logits'].shape[:2]
        out_prob = outputs['logits'].flatten(0, 1).sigmoid()
        out_bbox = outputs['pred_boxes'].flatten(0, 1)
        target_ids = torch.cat([v['class_labels'] for v in targets])
        target_bbox = torch.cat([v['boxes'] for v in targets])
        alpha = 0.25
        gamma = 2.0
        neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log()
        pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log()
        class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
        sizes = [len(v['boxes']) for v in targets]
        indices = [linear_sum_assignment(c[i]) for (i, c) in enumerate(cost_matrix.split(sizes, -1))]
        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for (i, j) in indices]

def _upcast(t: Tensor) -> Tensor:
    if t.is_floating_point():
        return t if t.dtype in (torch.float32, torch.float64) else t.float()
    else:
        return t if t.dtype in (torch.int32, torch.int64) else t.int()

def box_area(boxes: Tensor) -> Tensor:
    boxes = _upcast(boxes)
    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])

def box_iou(boxes1, boxes2):
    area1 = box_area(boxes1)
    area2 = box_area(boxes2)
    left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2])
    right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (right_bottom - left_top).clamp(min=0)
    inter = width_height[:, :, 0] * width_height[:, :, 1]
    union = area1[:, None] + area2 - inter
    iou = inter / union
    return (iou, union)

def generalized_box_iou(boxes1, boxes2):
    if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
        raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}')
    if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
        raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}')
    (iou, union) = box_iou(boxes1, boxes2)
    top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
    bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
    width_height = (bottom_right - top_left).clamp(min=0)
    area = width_height[:, :, 0] * width_height[:, :, 1]
    return iou - (area - union) / area

def nonzero_tuple(x):
    if torch.jit.is_scripting():
        if x.dim() == 0:
            return x.unsqueeze(0).nonzero().unbind(1)
        return x.nonzero().unbind(1)
    else:
        return x.nonzero(as_tuple=True)

class DetaMatcher:

    def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool=False):
        thresholds = thresholds[:]
        if thresholds[0] < 0:
            raise ValueError('Thresholds should be positive')
        thresholds.insert(0, -float('inf'))
        thresholds.append(float('inf'))
        if not all((low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:]))):
            raise ValueError('Thresholds should be sorted.')
        if not all((l in [-1, 0, 1] for l in labels)):
            raise ValueError('All labels should be either -1, 0 or 1')
        if len(labels) != len(thresholds) - 1:
            raise ValueError('Number of labels should be equal to number of thresholds - 1')
        self.thresholds = thresholds
        self.labels = labels
        self.allow_low_quality_matches = allow_low_quality_matches

    def __call__(self, match_quality_matrix):
        assert match_quality_matrix.dim() == 2
        if match_quality_matrix.numel() == 0:
            default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64)
            default_match_labels = match_quality_matrix.new_full((match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8)
            return (default_matches, default_match_labels)
        assert torch.all(match_quality_matrix >= 0)
        (matched_vals, matches) = match_quality_matrix.max(dim=0)
        match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
        for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
            low_high = (matched_vals >= low) & (matched_vals < high)
            match_labels[low_high] = l
        if self.allow_low_quality_matches:
            self.set_low_quality_matches_(match_labels, match_quality_matrix)
        return (matches, match_labels)

    def set_low_quality_matches_(self, match_labels, match_quality_matrix):
        (highest_quality_foreach_gt, _) = match_quality_matrix.max(dim=1)
        (_, pred_inds_with_highest_quality) = nonzero_tuple(match_quality_matrix == highest_quality_foreach_gt[:, None])
        match_labels[pred_inds_with_highest_quality] = 1

def subsample_labels(labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int):
    positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0]
    negative = nonzero_tuple(labels == bg_label)[0]
    num_pos = int(num_samples * positive_fraction)
    num_pos = min(positive.numel(), num_pos)
    num_neg = num_samples - num_pos
    num_neg = min(negative.numel(), num_neg)
    perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
    perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
    pos_idx = positive[perm1]
    neg_idx = negative[perm2]
    return (pos_idx, neg_idx)

def sample_topk_per_gt(pr_inds, gt_inds, iou, k):
    if len(gt_inds) == 0:
        return (pr_inds, gt_inds)
    (gt_inds2, counts) = gt_inds.unique(return_counts=True)
    (scores, pr_inds2) = iou[gt_inds2].topk(k, dim=1)
    gt_inds2 = gt_inds2[:, None].repeat(1, k)
    pr_inds3 = torch.cat([pr[:c] for (c, pr) in zip(counts, pr_inds2)])
    gt_inds3 = torch.cat([gt[:c] for (c, gt) in zip(counts, gt_inds2)])
    return (pr_inds3, gt_inds3)

class DetaStage2Assigner(nn.Module):

    def __init__(self, num_queries, max_k=4):
        super().__init__()
        self.positive_fraction = 0.25
        self.bg_label = 400
        self.batch_size_per_image = num_queries
        self.proposal_matcher = DetaMatcher(thresholds=[0.6], labels=[0, 1], allow_low_quality_matches=True)
        self.k = max_k

    def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor):
        has_gt = gt_classes.numel() > 0
        if has_gt:
            gt_classes = gt_classes[matched_idxs]
            gt_classes[matched_labels == 0] = self.bg_label
            gt_classes[matched_labels == -1] = -1
        else:
            gt_classes = torch.zeros_like(matched_idxs) + self.bg_label
        (sampled_fg_idxs, sampled_bg_idxs) = subsample_labels(gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label)
        sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
        return (sampled_idxs, gt_classes[sampled_idxs])

    def forward(self, outputs, targets, return_cost_matrix=False):
        bs = len(targets)
        indices = []
        ious = []
        for b in range(bs):
            (iou, _) = box_iou(center_to_corners_format(targets[b]['boxes']), center_to_corners_format(outputs['init_reference'][b].detach()))
            (matched_idxs, matched_labels) = self.proposal_matcher(iou)
            (sampled_idxs, sampled_gt_classes) = self._sample_proposals(matched_idxs, matched_labels, targets[b]['class_labels'])
            pos_pr_inds = sampled_idxs[sampled_gt_classes != self.bg_label]
            pos_gt_inds = matched_idxs[pos_pr_inds]
            (pos_pr_inds, pos_gt_inds) = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou)
            indices.append((pos_pr_inds, pos_gt_inds))
            ious.append(iou)
        if return_cost_matrix:
            return (indices, ious)
        return indices

    def postprocess_indices(self, pr_inds, gt_inds, iou):
        return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)

class DetaStage1Assigner(nn.Module):

    def __init__(self, t_low=0.3, t_high=0.7, max_k=4):
        super().__init__()
        self.positive_fraction = 0.5
        self.batch_size_per_image = 256
        self.k = max_k
        self.t_low = t_low
        self.t_high = t_high
        self.anchor_matcher = DetaMatcher(thresholds=[t_low, t_high], labels=[0, -1, 1], allow_low_quality_matches=True)

    def _subsample_labels(self, label):
        (pos_idx, neg_idx) = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0)
        label.fill_(-1)
        label.scatter_(0, pos_idx, 1)
        label.scatter_(0, neg_idx, 0)
        return label

    def forward(self, outputs, targets):
        bs = len(targets)
        indices = []
        for b in range(bs):
            anchors = outputs['anchors'][b]
            if len(targets[b]['boxes']) == 0:
                indices.append((torch.tensor([], dtype=torch.long, device=anchors.device), torch.tensor([], dtype=torch.long, device=anchors.device)))
                continue
            (iou, _) = box_iou(center_to_corners_format(targets[b]['boxes']), center_to_corners_format(anchors))
            (matched_idxs, matched_labels) = self.anchor_matcher(iou)
            matched_labels = self._subsample_labels(matched_labels)
            all_pr_inds = torch.arange(len(anchors), device=matched_labels.device)
            pos_pr_inds = all_pr_inds[matched_labels == 1]
            pos_gt_inds = matched_idxs[pos_pr_inds]
            (pos_pr_inds, pos_gt_inds) = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou)
            (pos_pr_inds, pos_gt_inds) = (pos_pr_inds.to(anchors.device), pos_gt_inds.to(anchors.device))
            indices.append((pos_pr_inds, pos_gt_inds))
        return indices

    def postprocess_indices(self, pr_inds, gt_inds, iou):
        return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)

# File: transformers-main/src/transformers/models/deprecated/efficientformer/__init__.py
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available
_import_structure = {'configuration_efficientformer': ['EfficientFormerConfig']}
try:
    if not is_vision_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['image_processing_efficientformer'] = ['EfficientFormerImageProcessor']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_efficientformer'] = ['EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher', 'EfficientFormerModel', 'EfficientFormerPreTrainedModel']
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_tf_efficientformer'] = ['TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher', 'TFEfficientFormerModel', 'TFEfficientFormerPreTrainedModel']
if TYPE_CHECKING:
    from .configuration_efficientformer import EfficientFormerConfig
    try:
        if not is_vision_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .image_processing_efficientformer import EfficientFormerImageProcessor
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_efficientformer import EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_tf_efficientformer import TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deprecated/efficientformer/configuration_efficientformer.py
""""""
from typing import List
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)

class EfficientFormerConfig(PretrainedConfig):
    model_type = 'efficientformer'

    def __init__(self, depths: List[int]=[3, 2, 6, 4], hidden_sizes: List[int]=[48, 96, 224, 448], downsamples: List[bool]=[True, True, True, True], dim: int=448, key_dim: int=32, attention_ratio: int=4, resolution: int=7, num_hidden_layers: int=5, num_attention_heads: int=8, mlp_expansion_ratio: int=4, hidden_dropout_prob: float=0.0, patch_size: int=16, num_channels: int=3, pool_size: int=3, downsample_patch_size: int=3, downsample_stride: int=2, downsample_pad: int=1, drop_path_rate: float=0.0, num_meta3d_blocks: int=1, distillation: bool=True, use_layer_scale: bool=True, layer_scale_init_value: float=1e-05, hidden_act: str='gelu', initializer_range: float=0.02, layer_norm_eps: float=1e-12, image_size: int=224, batch_norm_eps: float=1e-05, **kwargs) -> None:
        super().__init__(**kwargs)
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.hidden_sizes = hidden_sizes
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.patch_size = patch_size
        self.num_channels = num_channels
        self.depths = depths
        self.mlp_expansion_ratio = mlp_expansion_ratio
        self.downsamples = downsamples
        self.dim = dim
        self.key_dim = key_dim
        self.attention_ratio = attention_ratio
        self.resolution = resolution
        self.pool_size = pool_size
        self.downsample_patch_size = downsample_patch_size
        self.downsample_stride = downsample_stride
        self.downsample_pad = downsample_pad
        self.drop_path_rate = drop_path_rate
        self.num_meta3d_blocks = num_meta3d_blocks
        self.distillation = distillation
        self.use_layer_scale = use_layer_scale
        self.layer_scale_init_value = layer_scale_init_value
        self.image_size = image_size
        self.batch_norm_eps = batch_norm_eps

# File: transformers-main/src/transformers/models/deprecated/efficientformer/convert_efficientformer_original_pytorch_checkpoint_to_pytorch.py
""""""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling

def rename_key(old_name, num_meta4D_last_stage):
    new_name = old_name
    if 'patch_embed' in old_name:
        (_, layer, param) = old_name.split('.')
        if layer == '0':
            new_name = old_name.replace('0', 'convolution1')
        elif layer == '1':
            new_name = old_name.replace('1', 'batchnorm_before')
        elif layer == '3':
            new_name = old_name.replace('3', 'convolution2')
        else:
            new_name = old_name.replace('4', 'batchnorm_after')
    if 'network' in old_name and re.search('\\d\\.\\d', old_name):
        two_digit_num = '\\b\\d{2}\\b'
        if bool(re.search(two_digit_num, old_name)):
            match = re.search('\\d\\.\\d\\d.', old_name).group()
        else:
            match = re.search('\\d\\.\\d.', old_name).group()
        if int(match[0]) < 6:
            trimmed_name = old_name.replace(match, '')
            trimmed_name = trimmed_name.replace('network', match[0] + '.meta4D_layers.blocks.' + match[2:-1])
            new_name = 'intermediate_stages.' + trimmed_name
        else:
            trimmed_name = old_name.replace(match, '')
            if int(match[2]) < num_meta4D_last_stage:
                trimmed_name = trimmed_name.replace('network', 'meta4D_layers.blocks.' + match[2])
            else:
                layer_index = str(int(match[2]) - num_meta4D_last_stage)
                trimmed_name = trimmed_name.replace('network', 'meta3D_layers.blocks.' + layer_index)
                if 'norm1' in old_name:
                    trimmed_name = trimmed_name.replace('norm1', 'layernorm1')
                elif 'norm2' in old_name:
                    trimmed_name = trimmed_name.replace('norm2', 'layernorm2')
                elif 'fc1' in old_name:
                    trimmed_name = trimmed_name.replace('fc1', 'linear_in')
                elif 'fc2' in old_name:
                    trimmed_name = trimmed_name.replace('fc2', 'linear_out')
            new_name = 'last_stage.' + trimmed_name
    elif 'network' in old_name and re.search('.\\d.', old_name):
        new_name = old_name.replace('network', 'intermediate_stages')
    if 'fc' in new_name:
        new_name = new_name.replace('fc', 'convolution')
    elif 'norm1' in new_name and 'layernorm1' not in new_name:
        new_name = new_name.replace('norm1', 'batchnorm_before')
    elif 'norm2' in new_name and 'layernorm2' not in new_name:
        new_name = new_name.replace('norm2', 'batchnorm_after')
    if 'proj' in new_name:
        new_name = new_name.replace('proj', 'projection')
    if 'dist_head' in new_name:
        new_name = new_name.replace('dist_head', 'distillation_classifier')
    elif 'head' in new_name:
        new_name = new_name.replace('head', 'classifier')
    elif 'patch_embed' in new_name:
        new_name = 'efficientformer.' + new_name
    elif new_name == 'norm.weight' or new_name == 'norm.bias':
        new_name = new_name.replace('norm', 'layernorm')
        new_name = 'efficientformer.' + new_name
    else:
        new_name = 'efficientformer.encoder.' + new_name
    return new_name

def convert_torch_checkpoint(checkpoint, num_meta4D_last_stage):
    for key in checkpoint.copy().keys():
        val = checkpoint.pop(key)
        checkpoint[rename_key(key, num_meta4D_last_stage)] = val
    return checkpoint

def prepare_img():
    url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
    image = Image.open(requests.get(url, stream=True).raw)
    return image

def convert_efficientformer_checkpoint(checkpoint_path: Path, efficientformer_config_file: Path, pytorch_dump_path: Path, push_to_hub: bool):
    orig_state_dict = torch.load(checkpoint_path, map_location='cpu')['model']
    config = EfficientFormerConfig.from_json_file(efficientformer_config_file)
    model = EfficientFormerForImageClassificationWithTeacher(config)
    model_name = '_'.join(checkpoint_path.split('/')[-1].split('.')[0].split('_')[:-1])
    num_meta4D_last_stage = config.depths[-1] - config.num_meta3d_blocks + 1
    new_state_dict = convert_torch_checkpoint(orig_state_dict, num_meta4D_last_stage)
    model.load_state_dict(new_state_dict)
    model.eval()
    pillow_resamplings = {'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST}
    image = prepare_img()
    image_size = 256
    crop_size = 224
    processor = EfficientFormerImageProcessor(size={'shortest_edge': image_size}, crop_size={'height': crop_size, 'width': crop_size}, resample=pillow_resamplings['bicubic'])
    pixel_values = processor(images=image, return_tensors='pt').pixel_values
    image_transforms = Compose([Resize(image_size, interpolation=pillow_resamplings['bicubic']), CenterCrop(crop_size), ToTensor(), Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)])
    original_pixel_values = image_transforms(image).unsqueeze(0)
    assert torch.allclose(original_pixel_values, pixel_values)
    outputs = model(pixel_values)
    logits = outputs.logits
    expected_shape = (1, 1000)
    if 'l1' in model_name:
        expected_logits = torch.Tensor([-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328])
        assert torch.allclose(logits[0, :10], expected_logits, atol=0.001)
        assert logits.shape == expected_shape
    elif 'l3' in model_name:
        expected_logits = torch.Tensor([-1.315, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127])
        assert torch.allclose(logits[0, :10], expected_logits, atol=0.001)
        assert logits.shape == expected_shape
    elif 'l7' in model_name:
        expected_logits = torch.Tensor([-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878])
        assert logits.shape == expected_shape
    else:
        raise ValueError(f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7')
    Path(pytorch_dump_path).mkdir(exist_ok=True)
    model.save_pretrained(pytorch_dump_path)
    print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}')
    processor.save_pretrained(pytorch_dump_path)
    print(f'Processor successfuly saved at {pytorch_dump_path}')
    if push_to_hub:
        print('Pushing model to the hub...')
        model.push_to_hub(repo_id=f'Bearnardd/{pytorch_dump_path}', commit_message='Add model', use_temp_dir=True)
        processor.push_to_hub(repo_id=f'Bearnardd/{pytorch_dump_path}', commit_message='Add image processor', use_temp_dir=True)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.')
    parser.add_argument('--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.')
    parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.')
    parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
    parser.add_argument('--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub')
    parser.set_defaults(push_to_hub=True)
    args = parser.parse_args()
    convert_efficientformer_checkpoint(checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub)

# File: transformers-main/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py
""""""
from typing import Dict, List, Optional, Union
import numpy as np
from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ....image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format
from ....image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments
from ....utils import TensorType, logging
logger = logging.get_logger(__name__)

class EfficientFormerImageProcessor(BaseImageProcessor):
    model_input_names = ['pixel_values']

    def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, crop_size: Dict[str, int]=None, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, **kwargs) -> None:
        super().__init__(**kwargs)
        size = size if size is not None else {'height': 224, 'width': 224}
        size = get_size_dict(size)
        crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
        crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')
        self.do_resize = do_resize
        self.do_rescale = do_rescale
        self.do_normalize = do_normalize
        self.do_center_crop = do_center_crop
        self.crop_size = crop_size
        self.size = size
        self.resample = resample
        self.rescale_factor = rescale_factor
        self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
        self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
        self._valid_processor_keys = ['images', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'return_tensors', 'data_format', 'input_data_format']

    def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
        size = get_size_dict(size)
        if 'shortest_edge' in size:
            size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False, input_data_format=input_data_format)
        elif 'height' in size and 'width' in size:
            size = (size['height'], size['width'])
        else:
            raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
        return resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)

    def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
        do_resize = do_resize if do_resize is not None else self.do_resize
        do_rescale = do_rescale if do_rescale is not None else self.do_rescale
        do_normalize = do_normalize if do_normalize is not None else self.do_normalize
        do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
        crop_size = crop_size if crop_size is not None else self.crop_size
        crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)
        resample = resample if resample is not None else self.resample
        rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
        image_mean = image_mean if image_mean is not None else self.image_mean
        image_std = image_std if image_std is not None else self.image_std
        size = size if size is not None else self.size
        size_dict = get_size_dict(size)
        validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
        if not is_batched(images):
            images = [images]
        if not valid_images(images):
            raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
        validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
        images = [to_numpy_array(image) for image in images]
        if is_scaled_image(images[0]) and do_rescale:
            logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
        if input_data_format is None:
            input_data_format = infer_channel_dimension_format(images[0])
        if do_resize:
            images = [self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images]
        if do_center_crop:
            images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images]
        if do_rescale:
            images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
        if do_normalize:
            images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
        images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
        data = {'pixel_values': images}
        return BatchFeature(data=data, tensor_type=return_tensors)

# File: transformers-main/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
""""""
import itertools
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ....modeling_utils import PreTrainedModel
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_efficientformer import EfficientFormerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'EfficientFormerConfig'
_CHECKPOINT_FOR_DOC = 'snap-research/efficientformer-l1-300'
_EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
_IMAGE_CLASS_CHECKPOINT = 'snap-research/efficientformer-l1-300'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'Egyptian cat'

class EfficientFormerPatchEmbeddings(nn.Module):

    def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool=True):
        super().__init__()
        self.num_channels = num_channels
        self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=config.downsample_patch_size, stride=config.downsample_stride, padding=config.downsample_pad)
        self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) if apply_norm else nn.Identity()

    def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
        (batch_size, num_channels, height, width) = pixel_values.shape
        if num_channels != self.num_channels:
            raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        embeddings = self.projection(pixel_values)
        embeddings = self.norm(embeddings)
        return embeddings

class EfficientFormerSelfAttention(nn.Module):

    def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
        super().__init__()
        self.num_heads = num_heads
        self.key_dim = key_dim
        self.attention_ratio = attention_ratio
        self.scale = key_dim ** (-0.5)
        self.total_key_dim = key_dim * num_heads
        self.expanded_key_dim = int(attention_ratio * key_dim)
        self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
        hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
        self.qkv = nn.Linear(dim, hidden_size)
        self.projection = nn.Linear(self.total_expanded_key_dim, dim)
        points = list(itertools.product(range(resolution), range(resolution)))
        num_points = len(points)
        attention_offsets = {}
        idxs = []
        for point_1 in points:
            for point_2 in points:
                offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
                if offset not in attention_offsets:
                    attention_offsets[offset] = len(attention_offsets)
                idxs.append(attention_offsets[offset])
        self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
        self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(num_points, num_points))

    @torch.no_grad()
    def train(self, mode=True):
        super().train(mode)
        if mode and hasattr(self, 'ab'):
            del self.ab
        else:
            self.ab = self.attention_biases[:, self.attention_bias_idxs]

    def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Tuple[torch.Tensor]:
        (batch_size, sequence_length, num_channels) = hidden_states.shape
        qkv = self.qkv(hidden_states)
        (query_layer, key_layer, value_layer) = qkv.reshape(batch_size, sequence_length, self.num_heads, -1).split([self.key_dim, self.key_dim, self.expanded_key_dim], dim=3)
        query_layer = query_layer.permute(0, 2, 1, 3)
        key_layer = key_layer.permute(0, 2, 1, 3)
        value_layer = value_layer.permute(0, 2, 1, 3)
        if not self.training:
            self.ab = self.ab.to(self.attention_biases.device)
        attention_probs = torch.matmul(query_layer, key_layer.transpose(-2, -1)) * self.scale + (self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab)
        attention_probs = attention_probs.softmax(dim=-1)
        context_layer = torch.matmul(attention_probs, value_layer).transpose(1, 2)
        context_layer = context_layer.reshape(batch_size, sequence_length, self.total_expanded_key_dim)
        context_layer = self.projection(context_layer)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

class EfficientFormerConvStem(nn.Module):

    def __init__(self, config: EfficientFormerConfig, out_channels: int):
        super().__init__()
        self.convolution1 = nn.Conv2d(config.num_channels, out_channels // 2, kernel_size=3, stride=2, padding=1)
        self.batchnorm_before = nn.BatchNorm2d(out_channels // 2, eps=config.batch_norm_eps)
        self.convolution2 = nn.Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2, padding=1)
        self.batchnorm_after = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
        self.activation = nn.ReLU()

    def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
        features = self.batchnorm_before(self.convolution1(pixel_values))
        features = self.activation(features)
        features = self.batchnorm_after(self.convolution2(features))
        features = self.activation(features)
        return features

class EfficientFormerPooling(nn.Module):

    def __init__(self, pool_size: int):
        super().__init__()
        self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        output = self.pool(hidden_states) - hidden_states
        return output

class EfficientFormerDenseMlp(nn.Module):

    def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.linear_in = nn.Linear(in_features, hidden_features)
        self.activation = ACT2FN[config.hidden_act]
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.linear_out = nn.Linear(hidden_features, out_features)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        hidden_states = self.linear_in(hidden_states)
        hidden_states = self.activation(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.linear_out(hidden_states)
        hidden_states = self.dropout(hidden_states)
        return hidden_states

class EfficientFormerConvMlp(nn.Module):

    def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, drop: float=0.0):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.convolution1 = nn.Conv2d(in_features, hidden_features, 1)
        self.activation = ACT2FN[config.hidden_act]
        self.convolution2 = nn.Conv2d(hidden_features, out_features, 1)
        self.dropout = nn.Dropout(drop)
        self.batchnorm_before = nn.BatchNorm2d(hidden_features, eps=config.batch_norm_eps)
        self.batchnorm_after = nn.BatchNorm2d(out_features, eps=config.batch_norm_eps)

    def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
        hidden_state = self.convolution1(hidden_state)
        hidden_state = self.batchnorm_before(hidden_state)
        hidden_state = self.activation(hidden_state)
        hidden_state = self.dropout(hidden_state)
        hidden_state = self.convolution2(hidden_state)
        hidden_state = self.batchnorm_after(hidden_state)
        hidden_state = self.dropout(hidden_state)
        return hidden_state

def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor:
    if drop_prob == 0.0 or not training:
        return input
    keep_prob = 1 - drop_prob
    shape = (input.shape[0],) + (1,) * (input.ndim - 1)
    random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
    random_tensor.floor_()
    output = input.div(keep_prob) * random_tensor
    return output

class EfficientFormerDropPath(nn.Module):

    def __init__(self, drop_prob: Optional[float]=None) -> None:
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        return drop_path(hidden_states, self.drop_prob, self.training)

    def extra_repr(self) -> str:
        return 'p={}'.format(self.drop_prob)

class EfficientFormerFlat(nn.Module):

    def __init__(self):
        super().__init__()

    def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
        hidden_states = hidden_states.flatten(2).transpose(1, 2)
        return hidden_states

class EfficientFormerMeta3D(nn.Module):

    def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
        super().__init__()
        self.token_mixer = EfficientFormerSelfAttention(dim=config.dim, key_dim=config.key_dim, num_heads=config.num_attention_heads, attention_ratio=config.attention_ratio, resolution=config.resolution)
        self.layernorm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
        self.layernorm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
        mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
        self.mlp = EfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim)
        self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        self.use_layer_scale = config.use_layer_scale
        if config.use_layer_scale:
            self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
            self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)

    def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Tuple[torch.Tensor]:
        self_attention_outputs = self.token_mixer(self.layernorm1(hidden_states), output_attentions)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        if self.use_layer_scale:
            layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(0).unsqueeze(0) * attention_output)
            layer_output = layer_output + self.drop_path(self.layer_scale_2.unsqueeze(0).unsqueeze(0) * self.mlp(self.layernorm2(layer_output)))
        else:
            layer_output = hidden_states + self.drop_path(attention_output)
            layer_output = layer_output + self.drop_path(self.mlp(self.layernorm2(layer_output)))
        outputs = (layer_output,) + outputs
        return outputs

class EfficientFormerMeta3DLayers(nn.Module):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__()
        drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:-1])) for block_idx in range(config.num_meta3d_blocks)]
        self.blocks = nn.ModuleList([EfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path) for drop_path in drop_paths])

    def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Tuple[torch.Tensor]:
        all_attention_outputs = () if output_attentions else None
        for layer_module in self.blocks:
            if isinstance(hidden_states, tuple):
                hidden_states = hidden_states[0]
            hidden_states = layer_module(hidden_states, output_attentions)
            if output_attentions:
                all_attention_outputs = all_attention_outputs + (hidden_states[1],)
        if output_attentions:
            outputs = (hidden_states[0],) + all_attention_outputs
            return outputs
        return hidden_states

class EfficientFormerMeta4D(nn.Module):

    def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
        super().__init__()
        pool_size = config.pool_size if config.pool_size is not None else 3
        self.token_mixer = EfficientFormerPooling(pool_size=pool_size)
        mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
        self.mlp = EfficientFormerConvMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob)
        self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
        self.use_layer_scale = config.use_layer_scale
        if config.use_layer_scale:
            self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
            self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)

    def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
        outputs = self.token_mixer(hidden_states)
        if self.use_layer_scale:
            layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * outputs)
            layer_output = layer_output + self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(layer_output))
        else:
            layer_output = hidden_states + self.drop_path(outputs)
            layer_output = layer_output + self.drop_path(self.mlp(layer_output))
        return layer_output

class EfficientFormerMeta4DLayers(nn.Module):

    def __init__(self, config: EfficientFormerConfig, stage_idx: int):
        super().__init__()
        num_layers = config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
        drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)]
        self.blocks = nn.ModuleList([EfficientFormerMeta4D(config, config.hidden_sizes[stage_idx], drop_path=drop_path) for drop_path in drop_paths])

    def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
        for layer_module in self.blocks:
            hidden_states = layer_module(hidden_states)
        return hidden_states

class EfficientFormerIntermediateStage(nn.Module):

    def __init__(self, config: EfficientFormerConfig, index: int):
        super().__init__()
        self.meta4D_layers = EfficientFormerMeta4DLayers(config, index)

    def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
        hidden_states = self.meta4D_layers(hidden_states)
        return hidden_states

class EfficientFormerLastStage(nn.Module):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__()
        self.meta4D_layers = EfficientFormerMeta4DLayers(config, -1)
        self.flat = EfficientFormerFlat()
        self.meta3D_layers = EfficientFormerMeta3DLayers(config)

    def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> Tuple[torch.Tensor]:
        hidden_states = self.meta4D_layers(hidden_states)
        hidden_states = self.flat(hidden_states)
        hidden_states = self.meta3D_layers(hidden_states, output_attentions)
        return hidden_states

class EfficientFormerEncoder(nn.Module):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__()
        self.config = config
        num_intermediate_stages = len(config.depths) - 1
        downsamples = [config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1] for i in range(num_intermediate_stages)]
        intermediate_stages = []
        for i in range(num_intermediate_stages):
            intermediate_stages.append(EfficientFormerIntermediateStage(config, i))
            if downsamples[i]:
                intermediate_stages.append(EfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1]))
        self.intermediate_stages = nn.ModuleList(intermediate_stages)
        self.last_stage = EfficientFormerLastStage(config)

    def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: bool=True) -> BaseModelOutput:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        for layer_module in self.intermediate_stages:
            hidden_states = layer_module(hidden_states)
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
        layer_output = self.last_stage(hidden_states, output_attentions=output_attentions)
        if output_attentions:
            all_self_attentions = all_self_attentions + layer_output[1:]
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (layer_output[0],)
        if not return_dict:
            return tuple((v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None))
        return BaseModelOutput(last_hidden_state=layer_output[0], hidden_states=all_hidden_states, attentions=all_self_attentions)

class EfficientFormerPreTrainedModel(PreTrainedModel):
    config_class = EfficientFormerConfig
    base_model_prefix = 'efficientformer'
    main_input_name = 'pixel_values'
    supports_gradient_checkpointing = False

    def _init_weights(self, module: nn.Module):
        if isinstance(module, (nn.Linear, nn.Conv2d)):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
EFFICIENTFORMER_START_DOCSTRING = '\n    This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) subclass. Use it as a\n    regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n    Parameters:\n        config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
EFFICIENTFORMER_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`ViTImageProcessor`]. See\n            [`ViTImageProcessor.preprocess`] for details.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerModel(EfficientFormerPreTrainedModel):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__(config)
        self.config = config
        _no_split_modules = ['EfficientFormerMeta4D']
        self.patch_embed = EfficientFormerConvStem(config, config.hidden_sizes[0])
        self.encoder = EfficientFormerEncoder(config)
        self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
        self.post_init()

    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        embedding_output = self.patch_embed(pixel_values)
        encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output)
        if not return_dict:
            head_outputs = (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

@add_start_docstrings('\n    EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final\n    hidden state of the [CLS] token) e.g. for ImageNet.\n    ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.efficientformer = EfficientFormerModel(config)
        self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.efficientformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.classifier(sequence_output.mean(-2))
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@dataclass
class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
    logits: torch.FloatTensor = None
    cls_logits: torch.FloatTensor = None
    distillation_logits: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None

@add_start_docstrings('\n    EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden\n    state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for\n    ImageNet.\n\n    \n\n           This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n           supported.\n\n    \n    ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.efficientformer = EfficientFormerModel(config)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.distillation_classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
        self.post_init()

    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=EfficientFormerForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.efficientformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        cls_logits = self.classifier(sequence_output.mean(-2))
        distillation_logits = self.distillation_classifier(sequence_output.mean(-2))
        logits = (cls_logits + distillation_logits) / 2
        if not return_dict:
            output = (logits, cls_logits, distillation_logits) + outputs[1:]
            return output
        return EfficientFormerForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

# File: transformers-main/src/transformers/models/deprecated/efficientformer/modeling_tf_efficientformer.py
""""""
import itertools
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import tensorflow as tf
from ....activations_tf import ACT2FN
from ....modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFImageClassifierOutput
from ....modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs
from ....tf_utils import shape_list, stable_softmax
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_efficientformer import EfficientFormerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'EfficientFormerConfig'
_CHECKPOINT_FOR_DOC = 'snap-research/efficientformer-l1-300'
_EXPECTED_OUTPUT_SHAPE = [1, 49, 448]
_IMAGE_CLASS_CHECKPOINT = 'snap-research/efficientformer-l1-300'
_IMAGE_CLASS_EXPECTED_OUTPUT = 'LABEL_281'

class TFEfficientFormerPatchEmbeddings(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool=True, **kwargs) -> None:
        super().__init__(**kwargs)
        self.num_channels = num_channels
        self.padding = keras.layers.ZeroPadding2D(padding=config.downsample_pad)
        self.projection = keras.layers.Conv2D(filters=embed_dim, kernel_size=config.downsample_patch_size, strides=config.downsample_stride, padding='valid', name='projection')
        self.norm = keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name='norm') if apply_norm else tf.identity
        self.embed_dim = embed_dim

    def call(self, pixel_values: tf.Tensor, training: bool=False) -> tf.Tensor:
        tf.debugging.assert_shapes([(pixel_values, (..., None, None, self.num_channels))], message='Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
        embeddings = self.projection(self.padding(pixel_values))
        embeddings = self.norm(embeddings, training=training)
        return embeddings

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'projection', None) is not None:
            with tf.name_scope(self.projection.name):
                self.projection.build([None, None, None, self.num_channels])
        if getattr(self, 'norm', None) is not None:
            if hasattr(self.norm, 'name'):
                with tf.name_scope(self.norm.name):
                    self.norm.build([None, None, None, self.embed_dim])

class TFEfficientFormerSelfAttention(keras.layers.Layer):

    def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int, config: EfficientFormerConfig, **kwargs):
        super().__init__(**kwargs)
        self.num_heads = num_heads
        self.key_dim = key_dim
        self.attention_ratio = attention_ratio
        self.scale = key_dim ** (-0.5)
        self.total_key_dim = key_dim * num_heads
        self.expanded_key_dim = int(attention_ratio * key_dim)
        self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
        hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
        self.qkv = keras.layers.Dense(units=hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='qkv')
        self.projection = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), name='projection')
        self.resolution = resolution
        self.dim = dim

    def build(self, input_shape: tf.TensorShape) -> None:
        points = list(itertools.product(range(self.resolution), range(self.resolution)))
        num_points = len(points)
        attention_offsets = {}
        idxs = []
        for point_1 in points:
            for point_2 in points:
                offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
                if offset not in attention_offsets:
                    attention_offsets[offset] = len(attention_offsets)
                idxs.append(attention_offsets[offset])
        self.attention_biases = self.add_weight(shape=(self.num_heads, len(attention_offsets)), initializer=keras.initializers.zeros(), trainable=True, name='attention_biases')
        self.attention_bias_idxs = self.add_weight(shape=(num_points, num_points), trainable=False, dtype=tf.int32, name='attention_bias_idxs')
        self.attention_bias_idxs.assign(tf.reshape(tf.cast(idxs, dtype=tf.int32), (num_points, num_points)))
        if self.built:
            return
        self.built = True
        if getattr(self, 'qkv', None) is not None:
            with tf.name_scope(self.qkv.name):
                self.qkv.build([None, None, self.dim])
        if getattr(self, 'projection', None) is not None:
            with tf.name_scope(self.projection.name):
                self.projection.build([None, None, self.total_expanded_key_dim])

    def call(self, hidden_states: tf.Tensor, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        (batch_size, sequence_length, *_) = shape_list(hidden_states)
        qkv = self.qkv(inputs=hidden_states)
        (query_layer, key_layer, value_layer) = tf.split(tf.reshape(tensor=qkv, shape=(batch_size, sequence_length, self.num_heads, -1)), num_or_size_splits=[self.key_dim, self.key_dim, self.expanded_key_dim], axis=3)
        query_layer = tf.transpose(query_layer, perm=[0, 2, 1, 3])
        key_layer = tf.transpose(key_layer, perm=[0, 2, 1, 3])
        value_layer = tf.transpose(value_layer, perm=[0, 2, 1, 3])
        attention_probs = tf.matmul(query_layer, tf.transpose(key_layer, perm=[0, 1, 3, 2]))
        scale = tf.cast(self.scale, dtype=attention_probs.dtype)
        attention_probs = tf.multiply(attention_probs, scale)
        attention_biases = tf.gather(params=self.attention_biases, indices=self.attention_bias_idxs, axis=1)
        attention_probs = attention_probs + attention_biases
        attention_probs = stable_softmax(logits=attention_probs, axis=-1)
        context_layer = tf.matmul(attention_probs, value_layer)
        context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
        context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, sequence_length, self.total_expanded_key_dim))
        context_layer = self.projection(context_layer)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        return outputs

class TFEfficientFormerConvStem(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, out_channels: int, **kwargs):
        super().__init__(**kwargs)
        self.padding = keras.layers.ZeroPadding2D(padding=1)
        self.convolution1 = keras.layers.Conv2D(filters=out_channels // 2, kernel_size=3, strides=2, padding='valid', name='convolution1')
        self.batchnorm_before = keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name='batchnorm_before')
        self.convolution2 = keras.layers.Conv2D(filters=out_channels, kernel_size=3, strides=2, padding='valid', name='convolution2')
        self.batchnorm_after = keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name='batchnorm_after')
        self.activation = keras.layers.Activation(activation=keras.activations.relu, name='activation')
        self.out_channels = out_channels
        self.config = config

    def call(self, pixel_values: tf.Tensor, training: bool=False) -> tf.Tensor:
        features = self.batchnorm_before(self.convolution1(self.padding(pixel_values)), training=training)
        features = self.activation(features)
        features = self.batchnorm_after(self.convolution2(self.padding(features)), training=training)
        features = self.activation(features)
        return features

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution1', None) is not None:
            with tf.name_scope(self.convolution1.name):
                self.convolution1.build([None, None, None, self.config.num_channels])
        if getattr(self, 'batchnorm_before', None) is not None:
            with tf.name_scope(self.batchnorm_before.name):
                self.batchnorm_before.build([None, None, None, self.out_channels // 2])
        if getattr(self, 'convolution2', None) is not None:
            with tf.name_scope(self.convolution2.name):
                self.convolution2.build([None, None, None, self.out_channels // 2])
        if getattr(self, 'batchnorm_after', None) is not None:
            with tf.name_scope(self.batchnorm_after.name):
                self.batchnorm_after.build([None, None, None, self.out_channels])
        if getattr(self, 'activation', None) is not None:
            with tf.name_scope(self.activation.name):
                self.activation.build(None)

class TFEfficientFormerPooling(keras.layers.Layer):

    def __init__(self, pool_size: int, **kwargs):
        super().__init__(**kwargs)
        self.pool = keras.layers.AveragePooling2D(pool_size=pool_size, strides=1, padding='same')

    def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
        output = self.pool(hidden_states)
        output = output - hidden_states
        return output

class TFEfficientFormerDenseMlp(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, **kwargs):
        super().__init__(**kwargs)
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.linear_in = keras.layers.Dense(units=hidden_features, kernel_initializer=get_initializer(config.initializer_range), name='linear_in')
        self.activation = ACT2FN[config.hidden_act]
        self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
        self.linear_out = keras.layers.Dense(units=out_features, kernel_initializer=get_initializer(config.initializer_range), name='linear_out')
        self.hidden_features = hidden_features
        self.in_features = in_features

    def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_states = self.linear_in(inputs=hidden_states)
        hidden_states = self.activation(hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        hidden_states = self.linear_out(inputs=hidden_states)
        hidden_states = self.dropout(inputs=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'linear_in', None) is not None:
            with tf.name_scope(self.linear_in.name):
                self.linear_in.build([None, None, self.in_features])
        if getattr(self, 'linear_out', None) is not None:
            with tf.name_scope(self.linear_out.name):
                self.linear_out.build([None, None, self.hidden_features])

class TFEfficientFormerConvMlp(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, drop: float=0.0, **kwargs):
        super().__init__(**kwargs)
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.convolution1 = keras.layers.Conv2D(filters=hidden_features, kernel_size=1, name='convolution1', padding='valid')
        self.activation = ACT2FN[config.hidden_act]
        self.convolution2 = keras.layers.Conv2D(filters=out_features, kernel_size=1, name='convolution2', padding='valid')
        self.dropout = keras.layers.Dropout(rate=drop)
        self.batchnorm_before = keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name='batchnorm_before')
        self.batchnorm_after = keras.layers.BatchNormalization(axis=-1, epsilon=config.batch_norm_eps, momentum=0.9, name='batchnorm_after')
        self.hidden_features = hidden_features
        self.in_features = in_features
        self.out_features = out_features

    def call(self, hidden_state: tf.Tensor, training: bool=False) -> tf.Tensor:
        hidden_state = self.convolution1(hidden_state)
        hidden_state = self.batchnorm_before(hidden_state, training=training)
        hidden_state = self.activation(hidden_state)
        hidden_state = self.dropout(hidden_state, training=training)
        hidden_state = self.convolution2(hidden_state)
        hidden_state = self.batchnorm_after(hidden_state, training=training)
        hidden_state = self.dropout(hidden_state, training=training)
        return hidden_state

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'convolution1', None) is not None:
            with tf.name_scope(self.convolution1.name):
                self.convolution1.build([None, None, None, self.in_features])
        if getattr(self, 'convolution2', None) is not None:
            with tf.name_scope(self.convolution2.name):
                self.convolution2.build([None, None, None, self.hidden_features])
        if getattr(self, 'batchnorm_before', None) is not None:
            with tf.name_scope(self.batchnorm_before.name):
                self.batchnorm_before.build([None, None, None, self.hidden_features])
        if getattr(self, 'batchnorm_after', None) is not None:
            with tf.name_scope(self.batchnorm_after.name):
                self.batchnorm_after.build([None, None, None, self.out_features])

class TFEfficientFormerDropPath(keras.layers.Layer):

    def __init__(self, drop_path: float, **kwargs):
        super().__init__(**kwargs)
        self.drop_path = drop_path

    def call(self, x: tf.Tensor, training=None):
        if training:
            keep_prob = 1 - self.drop_path
            shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
            random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
            random_tensor = tf.floor(random_tensor)
            return x / keep_prob * random_tensor
        return x

class TFEfficientFormerFlat(keras.layers.Layer):

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def call(self, hidden_states: tf.Tensor) -> Tuple[tf.Tensor]:
        (batch_size, _, _, in_channels) = shape_list(hidden_states)
        hidden_states = tf.reshape(hidden_states, shape=[batch_size, -1, in_channels])
        return hidden_states

class TFEfficientFormerMeta3D(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0, **kwargs):
        super().__init__(**kwargs)
        self.token_mixer = TFEfficientFormerSelfAttention(dim=config.dim, key_dim=config.key_dim, num_heads=config.num_attention_heads, attention_ratio=config.attention_ratio, resolution=config.resolution, name='token_mixer', config=config)
        self.dim = dim
        self.config = config
        self.layernorm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm1')
        self.layernorm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm2')
        mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
        self.mlp = TFEfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, name='mlp')
        self.drop_path = TFEfficientFormerDropPath(drop_path) if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')
        self.config = config

    def build(self, input_shape=None):
        self.layer_scale_1 = None
        self.layer_scale_2 = None
        if self.config.use_layer_scale:
            self.layer_scale_1 = self.add_weight(shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_1')
            self.layer_scale_2 = self.add_weight(shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_2')
        if self.built:
            return
        self.built = True
        if getattr(self, 'token_mixer', None) is not None:
            with tf.name_scope(self.token_mixer.name):
                self.token_mixer.build(None)
        if getattr(self, 'layernorm1', None) is not None:
            with tf.name_scope(self.layernorm1.name):
                self.layernorm1.build([None, None, self.dim])
        if getattr(self, 'layernorm2', None) is not None:
            with tf.name_scope(self.layernorm2.name):
                self.layernorm2.build([None, None, self.dim])
        if getattr(self, 'mlp', None) is not None:
            with tf.name_scope(self.mlp.name):
                self.mlp.build(None)
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)

    def call(self, hidden_states: tf.Tensor, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        self_attention_outputs = self.token_mixer(hidden_states=self.layernorm1(hidden_states, training=training), output_attentions=output_attentions, training=training)
        attention_output = self_attention_outputs[0]
        outputs = self_attention_outputs[1:]
        if self.config.use_layer_scale:
            layer_output = hidden_states + self.drop_path(tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * attention_output, training=training)
            layer_output = layer_output + self.drop_path(tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0) * self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training), training=training)
        else:
            layer_output = hidden_states + self.drop_path(attention_output, training=training)
            layer_output = layer_output + self.drop_path(self.mlp(hidden_states=self.layernorm2(inputs=layer_output, training=training), training=training), training=training)
        outputs = (layer_output,) + outputs
        return outputs

class TFEfficientFormerMeta3DLayers(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, **kwargs):
        super().__init__(**kwargs)
        drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:-1])) for block_idx in range(config.num_meta3d_blocks)]
        self.blocks = [TFEfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path, name=f'blocks.{i}') for (i, drop_path) in enumerate(drop_paths)]

    def call(self, hidden_states: tf.Tensor, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        all_attention_outputs = () if output_attentions else None
        for (i, layer_module) in enumerate(self.blocks):
            if isinstance(hidden_states, tuple):
                hidden_states = hidden_states[0]
            hidden_states = layer_module(hidden_states=hidden_states, output_attentions=output_attentions, training=training)
            if output_attentions:
                all_attention_outputs = all_attention_outputs + (hidden_states[1],)
        if output_attentions:
            outputs = (hidden_states[0],) + all_attention_outputs
            return outputs
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'blocks', None) is not None:
            for layer in self.blocks:
                with tf.name_scope(layer.name):
                    layer.build(None)

class TFEfficientFormerMeta4D(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0, **kwargs):
        super().__init__(**kwargs)
        pool_size = config.pool_size if config.pool_size is not None else 3
        self.token_mixer = TFEfficientFormerPooling(pool_size=pool_size, name='token_mixer')
        self.dim = dim
        mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
        self.mlp = TFEfficientFormerConvMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob, name='mlp')
        self.drop_path = TFEfficientFormerDropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path')
        self.config = config

    def build(self, input_shape=None):
        self.layer_scale_1 = None
        self.layer_scale_2 = None
        if self.config.use_layer_scale:
            self.layer_scale_1 = self.add_weight(shape=self.dim, initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_1')
            self.layer_scale_2 = self.add_weight(shape=self.dim, initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_2')
        if self.built:
            return
        self.built = True
        if getattr(self, 'token_mixer', None) is not None:
            with tf.name_scope(self.token_mixer.name):
                self.token_mixer.build(None)
        if getattr(self, 'mlp', None) is not None:
            with tf.name_scope(self.mlp.name):
                self.mlp.build(None)
        if getattr(self, 'drop_path', None) is not None:
            with tf.name_scope(self.drop_path.name):
                self.drop_path.build(None)

    def call(self, hidden_states: tf.Tensor, training: bool=False) -> Tuple[tf.Tensor]:
        outputs = self.token_mixer(hidden_states)
        if self.config.use_layer_scale:
            layer_output = hidden_states + self.drop_path(tf.expand_dims(tf.expand_dims(self.layer_scale_1, 0), 0) * outputs, training=training)
            layer_output = layer_output + self.drop_path(tf.expand_dims(tf.expand_dims(self.layer_scale_2, 0), 0) * self.mlp(hidden_state=layer_output, training=training), training=training)
        else:
            layer_output = hidden_states + self.drop_path(outputs, training=training)
            layer_output = layer_output + self.drop_path(self.mlp(hidden_state=layer_output, training=training), training=training)
        return layer_output

class TFEfficientFormerMeta4DLayers(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, stage_idx: int, **kwargs):
        super().__init__(**kwargs)
        num_layers = config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
        drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)]
        self.blocks = [TFEfficientFormerMeta4D(config=config, dim=config.hidden_sizes[stage_idx], drop_path=drop_paths[i], name=f'blocks.{i}') for i in range(len(drop_paths))]

    def call(self, hidden_states: tf.Tensor, training: bool=False) -> Tuple[tf.Tensor]:
        for layer_module in self.blocks:
            hidden_states = layer_module(hidden_states=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'blocks', None) is not None:
            for layer in self.blocks:
                with tf.name_scope(layer.name):
                    layer.build(None)

class TFEfficientFormerIntermediateStage(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, index: int, **kwargs):
        super().__init__(**kwargs)
        self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=index, name='meta4D_layers')

    def call(self, hidden_states: tf.Tensor, training: bool=False) -> Tuple[tf.Tensor]:
        hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'meta4D_layers', None) is not None:
            with tf.name_scope(self.meta4D_layers.name):
                self.meta4D_layers.build(None)

class TFEfficientFormerLastStage(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, **kwargs):
        super().__init__(**kwargs)
        self.meta4D_layers = TFEfficientFormerMeta4DLayers(config=config, stage_idx=-1, name='meta4D_layers')
        self.flat = TFEfficientFormerFlat(name='flat')
        self.meta3D_layers = TFEfficientFormerMeta3DLayers(config, name='meta3D_layers')

    def call(self, hidden_states: tf.Tensor, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
        hidden_states = self.meta4D_layers(hidden_states=hidden_states, training=training)
        hidden_states = self.flat(hidden_states=hidden_states)
        hidden_states = self.meta3D_layers(hidden_states=hidden_states, output_attentions=output_attentions, training=training)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'meta4D_layers', None) is not None:
            with tf.name_scope(self.meta4D_layers.name):
                self.meta4D_layers.build(None)
        if getattr(self, 'flat', None) is not None:
            with tf.name_scope(self.flat.name):
                self.flat.build(None)
        if getattr(self, 'meta3D_layers', None) is not None:
            with tf.name_scope(self.meta3D_layers.name):
                self.meta3D_layers.build(None)

class TFEfficientFormerEncoder(keras.layers.Layer):

    def __init__(self, config: EfficientFormerConfig, **kwargs):
        super().__init__(**kwargs)
        self.config = config
        num_intermediate_stages = len(config.depths) - 1
        downsamples = [config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1] for i in range(num_intermediate_stages)]
        intermediate_stages = []
        layer_count = -1
        for i in range(num_intermediate_stages):
            layer_count += 1
            intermediate_stages.append(TFEfficientFormerIntermediateStage(config, i, name=f'intermediate_stages.{layer_count}'))
            if downsamples[i]:
                layer_count += 1
                intermediate_stages.append(TFEfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1], name=f'intermediate_stages.{layer_count}'))
        self.intermediate_stages = intermediate_stages
        self.last_stage = TFEfficientFormerLastStage(config, name='last_stage')

    def call(self, hidden_states: tf.Tensor, output_hidden_states: bool, output_attentions: bool, return_dict: bool, training: bool=False) -> TFBaseModelOutput:
        all_hidden_states = () if output_hidden_states else None
        all_self_attentions = () if output_attentions else None
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        for layer_module in self.intermediate_stages:
            hidden_states = layer_module(hidden_states, training=training)
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)
        layer_output = self.last_stage(hidden_states, output_attentions=output_attentions, training=training)
        if output_attentions:
            all_self_attentions = all_self_attentions + layer_output[1:]
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (layer_output[0],)
        if not return_dict:
            return tuple((v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None))
        return TFBaseModelOutput(last_hidden_state=layer_output[0], hidden_states=all_hidden_states, attentions=all_self_attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'last_stage', None) is not None:
            with tf.name_scope(self.last_stage.name):
                self.last_stage.build(None)
        for layer in self.intermediate_stages:
            with tf.name_scope(layer.name):
                layer.build(None)

@keras_serializable
class TFEfficientFormerMainLayer(keras.layers.Layer):
    config_class = EfficientFormerConfig

    def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
        super().__init__(**kwargs)
        self.config = config
        self.patch_embed = TFEfficientFormerConvStem(config, config.hidden_sizes[0], name='patch_embed')
        self.encoder = TFEfficientFormerEncoder(config, name='encoder')
        self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm')

    @unpack_inputs
    def call(self, pixel_values: Optional[tf.Tensor]=None, output_attentions: Optional[tf.Tensor]=None, output_hidden_states: Optional[tf.Tensor]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor, ...]]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if pixel_values is None:
            raise ValueError('You have to specify pixel_values')
        pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
        embedding_output = self.patch_embed(pixel_values, training=training)
        encoder_outputs = self.encoder(hidden_states=embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = encoder_outputs[0]
        sequence_output = self.layernorm(sequence_output, training=training)
        if output_hidden_states:
            hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1][:-1]]) + (encoder_outputs[1][-1],)
        if not return_dict:
            head_outputs = (sequence_output,)
            return head_outputs + encoder_outputs[1:]
        return TFBaseModelOutput(last_hidden_state=sequence_output, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'patch_embed', None) is not None:
            with tf.name_scope(self.patch_embed.name):
                self.patch_embed.build(None)
        if getattr(self, 'encoder', None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, 'layernorm', None) is not None:
            with tf.name_scope(self.layernorm.name):
                self.layernorm.build([None, None, self.config.hidden_sizes[-1]])

class TFEfficientFormerPreTrainedModel(TFPreTrainedModel):
    config_class = EfficientFormerConfig
    base_model_prefix = 'efficientformer'
    main_input_name = 'pixel_values'
EFFICIENTFORMER_START_DOCSTRING = '\n    This model is a TensorFlow\n    [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Use it as a regular\n    TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior.\n\n\n    Parameters:\n        config ([`EfficientFormerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
EFFICIENTFORMER_INPUTS_DOCSTRING = '\n    Args:\n        pixel_values ((`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n            [`EfficientFormerImageProcessor.__call__`] for details.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'

@add_start_docstrings('The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.', EFFICIENTFORMER_START_DOCSTRING)
class TFEfficientFormerModel(TFEfficientFormerPreTrainedModel):

    def __init__(self, config: EfficientFormerConfig, **kwargs) -> None:
        super().__init__(config, **kwargs)
        self.efficientformer = TFEfficientFormerMainLayer(config, name='efficientformer')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
    def call(self, pixel_values: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple, TFBaseModelOutput]:
        outputs = self.efficientformer(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        return outputs

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'efficientformer', None) is not None:
            with tf.name_scope(self.efficientformer.name):
                self.efficientformer.build(None)

@add_start_docstrings('\n    EfficientFormer Model transformer with an image classification head on top of pooled last hidden state, e.g. for\n    ImageNet.\n    ', EFFICIENTFORMER_START_DOCSTRING)
class TFEfficientFormerForImageClassification(TFEfficientFormerPreTrainedModel, TFSequenceClassificationLoss):

    def __init__(self, config: EfficientFormerConfig):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.efficientformer = TFEfficientFormerMainLayer(config, name='efficientformer')
        self.classifier = keras.layers.Dense(config.num_labels, name='classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='classifier')
        self.config = config

    @unpack_inputs
    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def call(self, pixel_values: Optional[tf.Tensor]=None, labels: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[tf.Tensor, TFImageClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.efficientformer(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
        loss = None if labels is None else self.hf_compute_loss(labels, logits)
        if not return_dict:
            output = (logits,) + outputs[1:]
            return (loss,) + output if loss is not None else output
        return TFImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'efficientformer', None) is not None:
            with tf.name_scope(self.efficientformer.name):
                self.efficientformer.build(None)
        if getattr(self, 'classifier', None) is not None:
            if hasattr(self.classifier, 'name'):
                with tf.name_scope(self.classifier.name):
                    self.classifier.build([None, None, self.config.hidden_sizes[-1]])

@dataclass
class TFEfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
    logits: tf.Tensor = None
    cls_logits: tf.Tensor = None
    distillation_logits: tf.Tensor = None
    hidden_states: Optional[Tuple[tf.Tensor]] = None
    attentions: Optional[Tuple[tf.Tensor]] = None

@add_start_docstrings('\n    EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden\n    state and a linear layer on top of the final hidden state of the distillation token) e.g. for ImageNet.\n\n    .. warning::\n            This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n            supported.\n    ', EFFICIENTFORMER_START_DOCSTRING)
class TFEfficientFormerForImageClassificationWithTeacher(TFEfficientFormerPreTrainedModel):

    def __init__(self, config: EfficientFormerConfig) -> None:
        super().__init__(config)
        self.num_labels = config.num_labels
        self.efficientformer = TFEfficientFormerMainLayer(config, name='efficientformer')
        self.classifier = keras.layers.Dense(config.num_labels, name='classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='classifier')
        self.distillation_classifier = keras.layers.Dense(config.num_labels, name='distillation_classifier') if config.num_labels > 0 else keras.layers.Activation('linear', name='distillation_classifier')

    @unpack_inputs
    @add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
    @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFEfficientFormerForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
    def call(self, pixel_values: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[tuple, TFEfficientFormerForImageClassificationWithTeacherOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        if training:
            raise Exception('This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet supported.')
        outputs = self.efficientformer(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
        sequence_output = outputs[0]
        cls_logits = self.classifier(tf.reduce_mean(sequence_output, axis=-2))
        distillation_logits = self.distillation_classifier(tf.reduce_mean(sequence_output, axis=-2))
        logits = (cls_logits + distillation_logits) / 2
        if not return_dict:
            output = (logits, cls_logits, distillation_logits) + outputs[1:]
            return output
        return TFEfficientFormerForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        if getattr(self, 'efficientformer', None) is not None:
            with tf.name_scope(self.efficientformer.name):
                self.efficientformer.build(None)
        if getattr(self, 'classifier', None) is not None:
            if hasattr(self.classifier, 'name'):
                with tf.name_scope(self.classifier.name):
                    self.classifier.build([None, None, self.config.hidden_sizes[-1]])
        if getattr(self, 'distillation_classifier', None) is not None:
            if hasattr(self.distillation_classifier, 'name'):
                with tf.name_scope(self.distillation_classifier.name):
                    self.distillation_classifier.build([None, None, self.config.hidden_sizes[-1]])

# File: transformers-main/src/transformers/models/deprecated/ernie_m/__init__.py
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
_import_structure = {'configuration_ernie_m': ['ErnieMConfig']}
try:
    if not is_sentencepiece_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['tokenization_ernie_m'] = ['ErnieMTokenizer']
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_ernie_m'] = ['ErnieMForMultipleChoice', 'ErnieMForQuestionAnswering', 'ErnieMForSequenceClassification', 'ErnieMForTokenClassification', 'ErnieMModel', 'ErnieMPreTrainedModel', 'ErnieMForInformationExtraction']
if TYPE_CHECKING:
    from .configuration_ernie_m import ErnieMConfig
    try:
        if not is_sentencepiece_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_ernie_m import ErnieMTokenizer
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_ernie_m import ErnieMForInformationExtraction, ErnieMForMultipleChoice, ErnieMForQuestionAnswering, ErnieMForSequenceClassification, ErnieMForTokenClassification, ErnieMModel, ErnieMPreTrainedModel
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py
""""""
from __future__ import annotations
from typing import Dict
from ....configuration_utils import PretrainedConfig

class ErnieMConfig(PretrainedConfig):
    model_type = 'ernie_m'
    attribute_map: Dict[str, str] = {'dropout': 'classifier_dropout', 'num_classes': 'num_labels'}

    def __init__(self, vocab_size: int=250002, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=514, initializer_range: float=0.02, pad_token_id: int=1, layer_norm_eps: float=1e-05, classifier_dropout=None, act_dropout=0.0, **kwargs):
        super().__init__(pad_token_id=pad_token_id, **kwargs)
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.initializer_range = initializer_range
        self.layer_norm_eps = layer_norm_eps
        self.classifier_dropout = classifier_dropout
        self.act_dropout = act_dropout

# File: transformers-main/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
""""""
import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn, tensor
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ....modeling_utils import PreTrainedModel
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_ernie_m import ErnieMConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = 'susnato/ernie-m-base_pytorch'
_CONFIG_FOR_DOC = 'ErnieMConfig'
_TOKENIZER_FOR_DOC = 'ErnieMTokenizer'

class ErnieMEmbeddings(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.hidden_size = config.hidden_size
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id)
        self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
        self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
        self.padding_idx = config.pad_token_id

    def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)
        if position_ids is None:
            input_shape = inputs_embeds.size()[:-1]
            ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
            seq_length = torch.cumsum(ones, dim=1)
            position_ids = seq_length - ones
            if past_key_values_length > 0:
                position_ids = position_ids + past_key_values_length
        position_ids += 2
        position_embeddings = self.position_embeddings(position_ids)
        embeddings = inputs_embeds + position_embeddings
        embeddings = self.layer_norm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings

class ErnieMSelfAttention(nn.Module):

    def __init__(self, config, position_embedding_type=None):
        super().__init__()
        if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
            raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size
        self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
        self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
        self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
        self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
        if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
            self.max_position_embeddings = config.max_position_embeddings
            self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
        self.is_decoder = config.is_decoder

    def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(new_x_shape)
        return x.permute(0, 2, 1, 3)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]:
        mixed_query_layer = self.q_proj(hidden_states)
        is_cross_attention = encoder_hidden_states is not None
        if is_cross_attention and past_key_value is not None:
            key_layer = past_key_value[0]
            value_layer = past_key_value[1]
            attention_mask = encoder_attention_mask
        elif is_cross_attention:
            key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
            value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
            attention_mask = encoder_attention_mask
        elif past_key_value is not None:
            key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
            value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
            key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
            value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
        else:
            key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
            value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
        query_layer = self.transpose_for_scores(mixed_query_layer)
        use_cache = past_key_value is not None
        if self.is_decoder:
            past_key_value = (key_layer, value_layer)
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
            (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2])
            if use_cache:
                position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
            else:
                position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
            position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
            distance = position_ids_l - position_ids_r
            positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
            positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
            if self.position_embedding_type == 'relative_key':
                relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores
            elif self.position_embedding_type == 'relative_key_query':
                relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
                relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
                attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if attention_mask is not None:
            attention_scores = attention_scores + attention_mask
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)
        attention_probs = self.dropout(attention_probs)
        if head_mask is not None:
            attention_probs = attention_probs * head_mask
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(new_context_layer_shape)
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        if self.is_decoder:
            outputs = outputs + (past_key_value,)
        return outputs

class ErnieMAttention(nn.Module):

    def __init__(self, config, position_embedding_type=None):
        super().__init__()
        self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
        self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        (heads, index) = find_pruneable_heads_and_indices(heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads)
        self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
        self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
        self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
        self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
        self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
        self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]:
        self_outputs = self.self_attn(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
        attention_output = self.out_proj(self_outputs[0])
        outputs = (attention_output,) + self_outputs[1:]
        return outputs

class ErnieMEncoderLayer(nn.Module):

    def __init__(self, config):
        super().__init__()
        dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
        act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
        self.self_attn = ErnieMAttention(config)
        self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
        self.dropout = nn.Dropout(act_dropout)
        self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
        self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        if isinstance(config.hidden_act, str):
            self.activation = ACT2FN[config.hidden_act]
        else:
            self.activation = config.hidden_act

    def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=True):
        residual = hidden_states
        if output_attentions:
            (hidden_states, attention_opt_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, output_attentions=output_attentions)
        else:
            hidden_states = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, output_attentions=output_attentions)
        hidden_states = residual + self.dropout1(hidden_states)
        hidden_states = self.norm1(hidden_states)
        residual = hidden_states
        hidden_states = self.linear1(hidden_states)
        hidden_states = self.activation(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.linear2(hidden_states)
        hidden_states = residual + self.dropout2(hidden_states)
        hidden_states = self.norm2(hidden_states)
        if output_attentions:
            return (hidden_states, attention_opt_weights)
        else:
            return hidden_states

class ErnieMEncoder(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.config = config
        self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])

    def forward(self, input_embeds: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
        hidden_states = () if output_hidden_states else None
        attentions = () if output_attentions else None
        output = input_embeds
        if output_hidden_states:
            hidden_states = hidden_states + (output,)
        for (i, layer) in enumerate(self.layers):
            layer_head_mask = head_mask[i] if head_mask is not None else None
            past_key_value = past_key_values[i] if past_key_values is not None else None
            (output, opt_attn_weights) = layer(hidden_states=output, attention_mask=attention_mask, head_mask=layer_head_mask, past_key_value=past_key_value)
            if output_hidden_states:
                hidden_states = hidden_states + (output,)
            if output_attentions:
                attentions = attentions + (opt_attn_weights,)
        last_hidden_state = output
        if not return_dict:
            return tuple((v for v in [last_hidden_state, hidden_states, attentions] if v is not None))
        return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions)

class ErnieMPooler(nn.Module):

    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.activation = nn.Tanh()

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        first_token_tensor = hidden_states[:, 0]
        pooled_output = self.dense(first_token_tensor)
        pooled_output = self.activation(pooled_output)
        return pooled_output

class ErnieMPreTrainedModel(PreTrainedModel):
    config_class = ErnieMConfig
    base_model_prefix = 'ernie_m'

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
ERNIE_M_START_DOCSTRING = '\n\n    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n    etc.)\n\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
ERNIE_M_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"

@add_start_docstrings('The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.', ERNIE_M_START_DOCSTRING)
class ErnieMModel(ErnieMPreTrainedModel):

    def __init__(self, config, add_pooling_layer=True):
        super(ErnieMModel, self).__init__(config)
        self.initializer_range = config.initializer_range
        self.embeddings = ErnieMEmbeddings(config)
        self.encoder = ErnieMEncoder(config)
        self.pooler = ErnieMPooler(config) if add_pooling_layer else None
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        for (layer, heads) in heads_to_prune.items():
            self.encoder.layers[layer].self_attn.prune_heads(heads)

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[tensor]=None, position_ids: Optional[tensor]=None, attention_mask: Optional[tensor]=None, head_mask: Optional[tensor]=None, inputs_embeds: Optional[tensor]=None, past_key_values: Optional[Tuple[Tuple[tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time.')
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        return_dict = return_dict if return_dict is not None else self.config.return_dict
        head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
        past_key_values_length = 0
        if past_key_values is not None:
            past_key_values_length = past_key_values[0][0].shape[2]
        if attention_mask is None:
            attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
            attention_mask *= torch.finfo(attention_mask.dtype).min
            if past_key_values is not None:
                batch_size = past_key_values[0][0].shape[0]
                past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
                attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
        elif attention_mask.ndim == 2:
            attention_mask = attention_mask.to(torch.float32)
            attention_mask = 1.0 - attention_mask
            attention_mask *= torch.finfo(attention_mask.dtype).min
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
        embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
        encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if not return_dict:
            sequence_output = encoder_outputs[0]
            pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
            return (sequence_output, pooler_output) + encoder_outputs[1:]
        sequence_output = encoder_outputs['last_hidden_state']
        pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
        hidden_states = None if not output_hidden_states else encoder_outputs['hidden_states']
        attentions = None if not output_attentions else encoder_outputs['attentions']
        return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooler_output, hidden_states=hidden_states, attentions=attentions)

@add_start_docstrings('ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of\n    the pooled output) e.g. for GLUE tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForSequenceClassification(ErnieMPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.config = config
        self.ernie_m = ErnieMModel(config)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        loss = None
        if labels is not None:
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = 'regression'
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = 'single_label_classification'
                else:
                    self.config.problem_type = 'multi_label_classification'
            if self.config.problem_type == 'regression':
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == 'single_label_classification':
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == 'multi_label_classification':
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('ErnieM Model with a multiple choice classification head on top (a linear layer on top of\n    the pooled output and a softmax) e.g. for RocStories/SWAG tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForMultipleChoice(ErnieMPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.ernie_m = ErnieMModel(config)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, 1)
        self.post_init()

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
        outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        reshaped_logits = logits.view(-1, num_choices)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)
        if not return_dict:
            output = (reshaped_logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('ErnieM Model with a token classification head on top (a linear layer on top of\n    the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForTokenClassification(ErnieMPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
        classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        self.dropout = nn.Dropout(classifier_dropout)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
        if not return_dict:
            output = (logits,) + outputs[2:]
            return (loss,) + output if loss is not None else output
        return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).', ERNIE_M_START_DOCSTRING)
class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):

    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
        self.post_init()

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
    @add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        sequence_output = outputs[0]
        logits = self.qa_outputs(sequence_output)
        (start_logits, end_logits) = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1).contiguous()
        end_logits = end_logits.squeeze(-1).contiguous()
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            output = (start_logits, end_logits) + outputs[2:]
            return (total_loss,) + output if total_loss is not None else output
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)

@add_start_docstrings('ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to\n    compute `start_prob` and `end_prob`, designed for Universal Information Extraction.', ERNIE_M_START_DOCSTRING)
class ErnieMForInformationExtraction(ErnieMPreTrainedModel):

    def __init__(self, config):
        super(ErnieMForInformationExtraction, self).__init__(config)
        self.ernie_m = ErnieMModel(config)
        self.linear_start = nn.Linear(config.hidden_size, 1)
        self.linear_end = nn.Linear(config.hidden_size, 1)
        self.sigmoid = nn.Sigmoid()
        self.post_init()

    @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
    def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
        result = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
        if return_dict:
            sequence_output = result.last_hidden_state
        elif not return_dict:
            sequence_output = result[0]
        start_logits = self.linear_start(sequence_output)
        start_logits = start_logits.squeeze(-1)
        end_logits = self.linear_end(sequence_output)
        end_logits = end_logits.squeeze(-1)
        total_loss = None
        if start_positions is not None and end_positions is not None:
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)
            loss_fct = BCEWithLogitsLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2
        if not return_dict:
            return tuple((i for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions] if i is not None))
        return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=result.hidden_states, attentions=result.attentions)

# File: transformers-main/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py
""""""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ....tokenization_utils import PreTrainedTokenizer
from ....utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = '▁'
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
RESOURCE_FILES_NAMES = {'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt'}

class ErnieMTokenizer(PreTrainedTokenizer):
    model_input_names: List[str] = ['input_ids']
    vocab_files_names = VOCAB_FILES_NAMES
    resource_files_names = RESOURCE_FILES_NAMES

    def __init__(self, sentencepiece_model_ckpt, vocab_file=None, do_lower_case=False, encoding='utf8', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
        self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
        self.do_lower_case = do_lower_case
        self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(sentencepiece_model_ckpt)
        if vocab_file is not None:
            self.vocab = self.load_vocab(filepath=vocab_file)
        else:
            self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
        self.reverse_vocab = {v: k for (k, v) in self.vocab.items()}
        super().__init__(do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, vocab_file=vocab_file, encoding=encoding, sp_model_kwargs=self.sp_model_kwargs, **kwargs)

    def get_offset_mapping(self, text):
        if text is None:
            return None
        split_tokens = self.tokenize(text)
        (normalized_text, char_mapping) = ('', [])
        for (i, ch) in enumerate(text):
            if ch in self.SP_CHAR_MAPPING:
                ch = self.SP_CHAR_MAPPING.get(ch)
            else:
                ch = unicodedata.normalize('NFKC', ch)
            if self.is_whitespace(ch):
                continue
            normalized_text += ch
            char_mapping.extend([i] * len(ch))
        (text, token_mapping, offset) = (normalized_text, [], 0)
        if self.do_lower_case:
            text = text.lower()
        for token in split_tokens:
            if token[:1] == '▁':
                token = token[1:]
            start = text[offset:].index(token) + offset
            end = start + len(token)
            token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
            offset = end
        return token_mapping

    @property
    def vocab_size(self):
        return len(self.vocab)

    def get_vocab(self):
        return dict(self.vocab, **self.added_tokens_encoder)

    def __getstate__(self):
        state = self.__dict__.copy()
        state['sp_model'] = None
        return state

    def __setstate__(self, d):
        self.__dict__ = d
        if not hasattr(self, 'sp_model_kwargs'):
            self.sp_model_kwargs = {}
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(self.sentencepiece_model_ckpt)

    def clean_text(self, text):
        return ''.join((self.SP_CHAR_MAPPING.get(c, c) for c in text))

    def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
        if self.sp_model_kwargs.get('enable_sampling') is True:
            enable_sampling = True
        if self.sp_model_kwargs.get('alpha') is not None:
            alpha = self.sp_model_kwargs.get('alpha')
        if self.sp_model_kwargs.get('nbest_size') is not None:
            nbest_size = self.sp_model_kwargs.get('nbest_size')
        if not enable_sampling:
            pieces = self.sp_model.EncodeAsPieces(text)
        else:
            pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
        new_pieces = []
        for (pi, piece) in enumerate(pieces):
            if piece == SPIECE_UNDERLINE:
                if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
                    new_pieces.append(SPIECE_UNDERLINE)
                    continue
                else:
                    continue
            lst_i = 0
            for (i, chunk) in enumerate(piece):
                if chunk == SPIECE_UNDERLINE:
                    continue
                if self.is_ch_char(chunk) or self.is_punct(chunk):
                    if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
                        new_pieces.append(piece[lst_i:i])
                    new_pieces.append(chunk)
                    lst_i = i + 1
                elif chunk.isdigit() and i > 0 and (not piece[i - 1].isdigit()):
                    if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
                        new_pieces.append(piece[lst_i:i])
                    lst_i = i
                elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
                    if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
                        new_pieces.append(piece[lst_i:i])
                    lst_i = i
            if len(piece) > lst_i:
                new_pieces.append(piece[lst_i:])
        return new_pieces

    def convert_tokens_to_string(self, tokens):
        out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
        return out_string

    def convert_ids_to_string(self, ids):
        tokens = self.convert_ids_to_tokens(ids)
        out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
        return out_string

    def _convert_token_to_id(self, token):
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.reverse_vocab.get(index, self.unk_token)

    def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
        if token_ids_1 is None:
            return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
        _cls = [self.cls_token_id]
        _sep = [self.sep_token_id]
        return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep

    def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
        if offset_mapping_1 is None:
            return [(0, 0)] + offset_mapping_0 + [(0, 0)]
        return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]

    def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
        if already_has_special_tokens:
            if token_ids_1 is not None:
                raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
            return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
        if token_ids_1 is not None:
            return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
        return [1] + [0] * len(token_ids_0) + [1]

    def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
        if token_ids_1 is None:
            return (len(token_ids_0) + 2) * [0]
        return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)

    def is_ch_char(self, char):
        if '一' <= char <= '\u9fff':
            return True
        return False

    def is_alpha(self, char):
        if 'a' <= char <= 'z' or 'A' <= char <= 'Z':
            return True
        return False

    def is_punct(self, char):
        if char in ',;:.?!~,;:。?!《》【】':
            return True
        return False

    def is_whitespace(self, char):
        if char == ' ' or char == '\t' or char == '\n' or (char == '\r'):
            return True
        if len(char) == 1:
            cat = unicodedata.category(char)
            if cat == 'Zs':
                return True
        return False

    def load_vocab(self, filepath):
        token_to_idx = {}
        with io.open(filepath, 'r', encoding='utf-8') as f:
            for (index, line) in enumerate(f):
                token = line.rstrip('\n')
                token_to_idx[token] = int(index)
        return token_to_idx

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
        index = 0
        if os.path.isdir(save_directory):
            vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
        else:
            vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
        with open(vocab_file, 'w', encoding='utf-8') as writer:
            for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]):
                if index != token_index:
                    logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
                    index = token_index
                writer.write(token + '\n')
                index += 1
        tokenizer_model_file = os.path.join(save_directory, 'sentencepiece.bpe.model')
        with open(tokenizer_model_file, 'wb') as fi:
            content_spiece_model = self.sp_model.serialized_model_proto()
            fi.write(content_spiece_model)
        return (vocab_file,)

# File: transformers-main/src/transformers/models/deprecated/gptsan_japanese/__init__.py
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
_import_structure = {'configuration_gptsan_japanese': ['GPTSanJapaneseConfig'], 'tokenization_gptsan_japanese': ['GPTSanJapaneseTokenizer']}
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    _import_structure['modeling_gptsan_japanese'] = ['GPTSanJapaneseForConditionalGeneration', 'GPTSanJapaneseModel', 'GPTSanJapanesePreTrainedModel']
    _import_structure['tokenization_gptsan_japanese'] = ['GPTSanJapaneseTokenizer']
if TYPE_CHECKING:
    from .configuration_gptsan_japanese import GPTSanJapaneseConfig
    from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_gptsan_japanese import GPTSanJapaneseForConditionalGeneration, GPTSanJapaneseModel, GPTSanJapanesePreTrainedModel
        from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
else:
    import sys
    sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)

# File: transformers-main/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
""""""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
logger = logging.get_logger(__name__)

class GPTSanJapaneseConfig(PretrainedConfig):
    model_type = 'gptsan-japanese'
    keys_to_ignore_at_inference = ['past_key_values']
    attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}

    def __init__(self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-05, router_bias=False, router_jitter_noise=0.0, router_dtype='float32', router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs):
        self.vocab_size = vocab_size
        self.max_position_embeddings = max_position_embeddings
        self.d_model = d_model
        self.d_ff = d_ff
        self.d_ext = d_ext
        self.d_spout = d_spout
        self.num_switch_layers = num_switch_layers
        self.num_ext_layers = num_ext_layers
        self.num_layers = num_switch_layers + num_ext_layers
        self.num_heads = num_heads
        self.num_experts = num_experts
        self.expert_capacity = expert_capacity
        self.dropout_rate = dropout_rate
        self.layer_norm_epsilon = layer_norm_epsilon
        self.router_bias = router_bias
        self.router_jitter_noise = router_jitter_noise
        self.router_dtype = router_dtype
        self.router_ignore_padding_tokens = router_ignore_padding_tokens
        self.output_hidden_states = output_hidden_states
        self.output_attentions = output_attentions
        self.initializer_factor = initializer_factor
        self.output_router_logits = output_router_logits
        self.use_cache = use_cache
        super().__init__(separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs)

# File: transformers-main/src/transformers/models/deprecated/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py
""""""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch

def convert_tf_gptsan_to_pt(args):
    parameter_file = os.path.join(args.tf_model_dir, 'parameters.json')
    params = json.loads(open(parameter_file).read())
    if not params:
        raise ValueError(f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.')
    if not args.output.endswith('.pt'):
        args.output = args.output + '.pt'
    new_state = OrderedDict()
    with tf.device('/CPU:0'):
        reader = tf.train.load_checkpoint(args.tf_model_dir)
        shapes = reader.get_variable_to_shape_map()
        for key_name in shapes.keys():
            vnp = reader.get_tensor(key_name).astype(np.float16)
            if key_name.endswith('/adam_m') or key_name.endswith('/adam_v'):
                continue
            if key_name.startswith('pasts/'):
                if key_name.startswith('pasts/mlp'):
                    player = int(key_name[9])
                elif key_name.startswith('pasts/out'):
                    player = 8
                name = 'model.sqout.%d.weight' % (player * 2)
                state = vnp.transpose([1, 0]).copy()
                new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/moe'):
                player = int(key_name[9:].split('/')[0])
                if key_name.endswith('/switch_gating/kernel'):
                    name = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
                    state = vnp.transpose([1, 0]).copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/softmlp/kernel'):
                    name = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
                    state = vnp.transpose([1, 0]).copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/wo/kernel') or key_name.endswith('/wi/kernel'):
                    nlayer = key_name[-9:-7]
                    for i in range(16):
                        name = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
                        state = vnp[i].transpose([1, 0]).copy()
                        new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/mlp'):
                player = int(key_name[9:].split('/')[0])
                if key_name.endswith('/p1/kernel'):
                    name = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
                    state = vnp.transpose([1, 0]).copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/p1/bias'):
                    name = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/p2/kernel'):
                    name = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
                    state = vnp.transpose([1, 0]).copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/p2/bias'):
                    name = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/ln'):
                player = int(key_name[8:].split('/')[0])
                if key_name.endswith('/b'):
                    name = 'model.blocks.%d.feed_forward.norm.bias' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/g'):
                    name = 'model.blocks.%d.feed_forward.norm.weight' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/att'):
                player = int(key_name[9:].split('/')[0])
                if key_name.endswith('/qkv/kernel'):
                    state = vnp.copy()
                    state_q = state[:, 0, :, :]
                    state_k = state[:, 1, :, :]
                    state_v = state[:, 2, :, :]
                    state_q = state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]]).transpose([1, 0]).copy()
                    state_k = state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]]).transpose([1, 0]).copy()
                    state_v = state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]]).transpose([1, 0]).copy()
                    name = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
                    new_state[name] = torch.tensor(state_q)
                    name = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
                    new_state[name] = torch.tensor(state_k)
                    name = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
                    new_state[name] = torch.tensor(state_v)
                elif key_name.endswith('/o/kernel'):
                    name = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
                    state = vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
                    new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/an'):
                player = int(key_name[8:].split('/')[0])
                if key_name.endswith('/b'):
                    name = 'model.blocks.%d.self_attn.norm.bias' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
                elif key_name.endswith('/g'):
                    name = 'model.blocks.%d.self_attn.norm.weight' % player
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/wte') or key_name.startswith('model/wpe') or key_name.startswith('model/ete'):
                nlayer = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[key_name[-3:]]
                name = 'model.%s.weight' % nlayer
                state = vnp.copy()
                new_state[name] = torch.tensor(state)
                if key_name.startswith('model/wte'):
                    name = 'lm_head.weight'
                    state = vnp.copy()
                    new_state[name] = torch.tensor(state)
            elif key_name.startswith('model/wob'):
                name = 'final_logits_bias'
                state = vnp.copy()
                state = state.reshape((1, -1))
                new_state[name] = torch.tensor(state)
            elif key_name == 'model/dense/kernel':
                name = 'model.last_project.weight'
                state = vnp.transpose([1, 0]).copy()
                new_state[name] = torch.tensor(state)
            elif key_name == 'model/dense_1/bias':
                name = 'model.last_project.bias'
                state = vnp.copy()
                new_state[name] = torch.tensor(state)
    torch.save(new_state, args.output)
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
    parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
    args = parser.parse_args()
    convert_tf_gptsan_to_pt(args)

# File: transformers-main/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
""""""
import copy
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from ....activations import ACT2FN
from ....modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
from ....modeling_utils import PreTrainedModel
from ....utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = 'GPTSanJapaneseConfig'
_CHECKPOINT_FOR_DOC = 'Tanrei/GPTSAN-japanese'

def router_z_loss_func(router_logits: torch.Tensor) -> float:
    (num_groups, tokens_per_group, _) = router_logits.shape
    log_z = torch.logsumexp(router_logits, dim=-1)
    z_loss = log_z ** 2
    return torch.sum(z_loss) / (num_groups * tokens_per_group)

def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
    num_experts = router_probs.shape[-1]
    if expert_indices.dtype != torch.int64:
        expert_indices = expert_indices.to(torch.int64)
    if len(expert_indices.shape) == 2:
        expert_indices = expert_indices.unsqueeze(2)
    expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
    expert_mask = torch.max(expert_mask, axis=-2).values
    expert_mask = expert_mask.to(torch.float32)
    tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
    router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
    return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * num_experts ** 2

class GPTSanJapaneseDenseActDense(nn.Module):

    def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
        super().__init__()
        d_inter = config.d_ext if ext_layer else config.d_ff
        self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer)
        self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer)
        self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate)
        self.act = ACT2FN['swish' if ext_layer else 'relu']

    def forward(self, hidden_states):
        hidden_states = self.wi(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states = self.dropout(hidden_states)
        hidden_states = self.wo(hidden_states)
        return hidden_states

class GPTSanJapaneseTop1Router(nn.Module):

    def __init__(self, config: GPTSanJapaneseConfig):
        super().__init__()
        self.num_experts = config.num_experts
        self.expert_capacity = config.expert_capacity
        self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
        self.jitter_noise = config.router_jitter_noise
        self.ignore_padding_tokens = config.router_ignore_padding_tokens
        self.dtype = getattr(torch, config.router_dtype)

    def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        self.input_dtype = hidden_states.dtype
        hidden_states = hidden_states.to(self.dtype)
        if self.training and self.jitter_noise > 0:
            hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
        self._cast_classifier()
        router_logits = self.classifier(hidden_states)
        router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
        return (router_probabilities, router_logits)

    def _cast_classifier(self):
        if not (hasattr(self.classifier, 'SCB') or hasattr(self.classifier, 'CB')):
            self.classifier = self.classifier.to(self.dtype)

    def forward(self, hidden_states: torch.Tensor) -> Tuple:
        (router_probs, router_logits) = self._compute_router_probabilities(hidden_states)
        expert_index = torch.argmax(router_probs, dim=-1)
        expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
        token_priority = torch.cumsum(expert_index, dim=-2)
        expert_capacity_mask = token_priority <= self.expert_capacity
        expert_index = expert_index * expert_capacity_mask
        router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
        return (expert_index, router_probs, router_logits)

class GPTSanJapaneseSparseMLP(nn.Module):

    def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module=GPTSanJapaneseDenseActDense):
        super().__init__()
        self.router = GPTSanJapaneseTop1Router(config)
        self.experts = nn.ModuleDict()
        for idx in range(config.num_experts):
            self.experts[f'expert_{idx}'] = expert_class(config)

    def forward(self, hidden_states):
        (router_mask, router_probs, router_logits) = self.router(hidden_states)
        expert_index = torch.argmax(router_mask, dim=-1)
        next_states = hidden_states.clone()
        for (idx, expert) in enumerate(self.experts.values()):
            token_indices = router_mask[:, :, idx].bool()
            next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
        hidden_states = router_probs * next_states
        return (hidden_states, (router_logits, expert_index))

class GPTSanJapaneseLayerSparseFF(nn.Module):

    def __init__(self, config: GPTSanJapaneseConfig):
        super().__init__()
        self.mlp = GPTSanJapaneseSparseMLP(config)
        self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)
        self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)

    def forward(self, hidden_states, output_router_logits):
        (forwarded_states, router_tuple) = self.mlp(hidden_states)
        forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))
        output = hidden_states + self.norm(forwarded_states)
        if output_router_logits and router_tuple is not None:
            return (output, router_tuple)
        else:
            return output

class GPTSanJapaneseLayerDenseFF(nn.Module):

    def __init__(self, config: GPTSanJapaneseConfig):
        super().__init__()
        self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True)
        self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)

    def forward(self, hidden_states):
        forwarded_states = self.mlp(hidden_states)
        output = hidden_states + self.norm(forwarded_states)
        return output

class GPTSanJapaneseAttention(nn.Module):

    def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[GPTSanJapaneseConfig]=None):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        self.config = config
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
        self.scaling = self.head_dim ** (-0.5)
        self.is_decoder = is_decoder
        self.is_causal = is_causal
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        is_cross_attention = key_value_states is not None
        (bsz, tgt_len, _) = hidden_states.size()
        query_states = self.q_proj(hidden_states) * self.scaling
        if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]):
            key_states = past_key_value[0]
            value_states = past_key_value[1]
        elif is_cross_attention:
            key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
            value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
        elif past_key_value is not None:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
            key_states = torch.cat([past_key_value[0], key_states], dim=2)
            value_states = torch.cat([past_key_value[1], value_states], dim=2)
        else:
            key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
            value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
        if self.is_decoder:
            past_key_value = (key_states, value_states)
        proj_shape = (bsz * self.num_heads, -1, self.head_dim)
        query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
        key_states = key_states.reshape(*proj_shape)
        value_states = value_states.reshape(*proj_shape)
        src_len = key_states.size(1)
        attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
        if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
            raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
        if attention_mask is not None:
            if attention_mask.size() != (bsz, 1, tgt_len, src_len):
                raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
            attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
        attn_weights = nn.functional.softmax(attn_weights, dim=-1)
        if layer_head_mask is not None:
            if layer_head_mask.size() != (self.num_heads,):
                raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
            attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
        if output_attentions:
            attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
            attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
        else:
            attn_weights_reshaped = None
        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_probs, value_states)
        if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
            raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
        attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
        attn_output = attn_output.transpose(1, 2)
        attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
        attn_output = self.out_proj(attn_output)
        return (attn_output, attn_weights_reshaped, past_key_value)

class GPTSanJapaneseLayerSelfAttention(nn.Module):

    def __init__(self, config, has_relative_attention_bias=False):
        super().__init__()
        self.self_attn = GPTSanJapaneseAttention(embed_dim=config.d_model, num_heads=config.num_heads, is_decoder=True, bias=has_relative_attention_bias)
        self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)

    def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        atten_out = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, layer_head_mask=head_mask, output_attentions=output_attentions)
        if output_attentions:
            attn_weights = (atten_out[1],)
        else:
            attn_weights = ()
        attention_output = atten_out[0]
        hidden = hidden_states + self.norm(attention_output)
        if use_cache:
            outputs = (hidden, atten_out[2])
        else:
            outputs = (hidden,)
        return outputs + attn_weights

class GPTSanJapaneseBlock(nn.Module):

    def __init__(self, config, ext_layer=False):
        super().__init__()
        self.self_attn = GPTSanJapaneseLayerSelfAttention(config)
        self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config)

    def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, output_router_tuple: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
        atten_out = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions)
        attention_output = atten_out[0]
        if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF):
            sparse_out = self.feed_forward(attention_output, output_router_tuple)
            if output_router_tuple:
                (hidden, router_tuple) = sparse_out
            else:
                hidden = sparse_out
        else:
            hidden = self.feed_forward(attention_output)
        outputs = (hidden,) + atten_out[1:]
        if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple:
            outputs += (router_tuple,)
        return outputs

class GPTSanJapanesePreTrainedModel(PreTrainedModel):
    config_class = GPTSanJapaneseConfig
    base_model_prefix = 'gptsan_japanese'
    supports_gradient_checkpointing = False
    _no_split_modules = ['GPTSanJapaneseBlock']
    _skip_keys_device_placement = 'past_key_values'

    @property
    def dummy_inputs(self):
        input_ids = torch.tensor(DUMMY_INPUTS)
        input_mask = torch.tensor(DUMMY_MASK)
        dummy_inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
        return dummy_inputs

    def _init_weights(self, module):
        factor = self.config.initializer_factor
        if isinstance(module, nn.LayerNorm):
            module.weight.data.fill_(factor * 1.0)
            module.bias.data.zero_()
        elif isinstance(module, nn.Linear):
            module.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
            if hasattr(module, 'bias') and module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=factor * 1.0)
        elif isinstance(module, GPTSanJapaneseModel):
            module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
            module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
            if hasattr(module, 'extra_position_embeddings') and module.extra_position_embeddings is not None:
                module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
        elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)):
            module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0)
            if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings):
                module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
        elif isinstance(module, GPTSanJapaneseDenseActDense):
            module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
            if hasattr(module.wi, 'bias') and module.wi.bias is not None:
                module.wi.bias.data.zero_()
            module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5))
            if hasattr(module.wo, 'bias') and module.wo.bias is not None:
                module.wo.bias.data.zero_()
        elif isinstance(module, GPTSanJapaneseAttention):
            d_model = self.config.d_model
            key_value_proj_dim = self.config.d_model
            n_heads = self.config.num_heads
            module.k_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
            module.v_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
            module.q_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
            module.out_proj.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5))
        elif isinstance(module, GPTSanJapaneseSparseMLP):
            d_model = self.config.d_model
            key_value_proj_dim = self.config.d_model
            n_heads = self.config.num_heads
            module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
            for idx in range(self.config.num_experts):
                module.experts[f'expert_{idx}'].wi.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
                module.experts[f'expert_{idx}'].wo.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))

    def _shift_right(self, input_ids):
        decoder_start_token_id = self.config.decoder_start_token_id
        pad_token_id = self.config.pad_token_id
        if decoder_start_token_id is None:
            raise ValueError('self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information.')
        if is_torch_fx_proxy(input_ids):
            shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
            shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
        else:
            shifted_input_ids = input_ids.new_zeros(input_ids.shape)
            shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
            shifted_input_ids[..., 0] = decoder_start_token_id
        if pad_token_id is None:
            raise ValueError('self.model.config.pad_token_id has to be defined.')
        shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
        return shifted_input_ids
GPTSAN_JAPANESE_START_DOCSTRING = '\n\n    The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer\n    based Japanese language model\n\n    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n    and behavior.\n\n    Parameters:\n        config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
GPTSAN_JAPANESE_INPUTS_DOCSTRING = "\n    Args:\n        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n            Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence\n            continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are\n            automatically appended.\n        attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **prefix** input,\n            - 0 for tokens that are **not-prefix** input.\n        spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`):\n                This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`.\n        past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n            Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n            `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n        use_cache (`bool`, *optional*):\n            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n            `past_key_values`).\n        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n            input (see `past_key_values`). This is useful if you want more control over how to convert\n            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n        router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):\n            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.\n            Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.\n"

@add_start_docstrings('The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):

    def __init__(self, config: GPTSanJapaneseConfig):
        super().__init__(config)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
        self.config = copy.deepcopy(config)
        self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
        self.last_project = nn.Linear(config.d_model, config.d_model, bias=True)
        self.act = ACT2FN['swish']
        self.blocks = torch.nn.ModuleList([])
        for _ in range(config.num_switch_layers):
            self.blocks.append(GPTSanJapaneseBlock(config))
        for _ in range(config.num_ext_layers):
            self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True))
        if config.num_ext_layers > 0:
            self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
        if config.d_spout:
            spouts = []
            for _ in range(8):
                spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False))
                spouts.append(nn.Tanh())
            spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False))
            self.spout = nn.Sequential(*spouts)
        self.post_init()

    def get_input_embeddings(self):
        return self.embed_tokens

    def set_input_embeddings(self, new_embeddings):
        self.embed_tokens = new_embeddings

    @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, num_precontext: Optional[torch.LongTensor]=None) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]:
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        device = self.position_embeddings.weight.device
        if input_ids is None:
            input_ids = torch.zeros([1, 1]).int().to(device)
        if inputs_embeds is not None:
            raise NotImplementedError('GPTSanJapaneseModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead.')
        num_pasts_contexts = 0
        num_batch = input_ids.shape[0]
        pasts_or_spout_value = None
        if past_key_values is not None:
            num_pasts_contexts = past_key_values[0][0].shape[2]
        elif self.config.d_spout and spout is not None:
            num_pasts_contexts += 1
        if self.config.d_spout and spout is not None and (attention_mask is not None):
            attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device)
            attention_mask_with_spout[:, 1:] -= 1 - attention_mask
            attention_mask = attention_mask_with_spout
        if num_precontext is not None:
            if not (len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1):
                raise ValueError('num_precontext should be [batch, 1] size.')
            num_precontext = torch.reshape(num_precontext, [-1])
        else:
            num_precontext = torch.zeros([num_batch]).int().to(device)
        num_input_contexts = input_ids.shape[1]
        num_output_contexts = num_input_contexts + num_pasts_contexts
        hidden_states = self.embed_tokens(input_ids)
        if past_key_values is not None:
            pasts_or_spout_value = past_key_values
        elif self.config.d_spout and spout is not None:
            pasts_or_spout_value = self.spout(spout)
            pasts_or_spout_value = torch.reshape(pasts_or_spout_value, [num_batch, self.config.num_layers, 2, self.config.num_heads, num_pasts_contexts, self.config.d_model // self.config.num_heads])
            pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1)
            pasts_or_spout_value = tuple((tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value))
        else:
            pasts_or_spout_value = [None] * self.config.num_layers
        token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts
        if attention_mask is None:
            attention_mask = torch.ones(num_batch, num_input_contexts, device=device)
        gather_position = (torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + token_position.unsqueeze(0)).transpose(1, 2).long()
        gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2)
        gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
        for i in range(num_batch):
            hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i])
        causal_mask = torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)).view(1, 1, num_output_contexts, num_output_contexts).to(device)
        prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :]
        if token_type_ids is not None:
            token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2)
            prefix_lm_mask = (prefix_lm_mask + token_type_ids > 0).float()
        extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
        if head_mask is not None:
            head_mask = self.get_head_mask(head_mask, self.config.num_switch_layers + self.config.num_ext_layers)
        present_key_value_states = () if self.config.use_cache or use_cache else None
        all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None
        all_attentions = () if self.config.output_attentions or output_attentions else None
        all_router_probs = () if self.config.output_router_logits or output_router_logits else None
        for (layer, past) in enumerate(pasts_or_spout_value):
            if layer == self.config.num_switch_layers:
                if self.config.num_ext_layers > 0:
                    for i in range(num_batch):
                        hidden_states[i] += torch.gather(self.extra_position_embeddings.weight, dim=0, index=gather_position[i])
            output_router_tuple = (self.config.output_router_logits or output_router_logits) and layer < self.config.num_switch_layers
            block_output = self.blocks[layer](hidden_states=hidden_states, past_key_value=past, attention_mask=extended_attention_mask, head_mask=head_mask, use_cache=self.config.use_cache or use_cache, output_attentions=self.config.output_attentions or output_attentions, output_router_tuple=output_router_tuple)
            outpos = 0
            hidden_states = block_output[outpos]
            if self.config.output_hidden_states or output_hidden_states:
                all_hidden_states += (hidden_states,)
            if self.config.use_cache or use_cache:
                outpos += 1
                present = block_output[outpos]
                present_key_value_states += (present,)
            if self.config.output_attentions or output_attentions:
                outpos += 1
                attention_probs = block_output[outpos]
                all_attentions += (attention_probs,)
            if output_router_tuple:
                outpos += 1
                router_tuple = block_output[outpos]
                all_router_probs.append(router_tuple[0])
        hidden_states = self.last_project(hidden_states)
        hidden_states = self.act(hidden_states)
        if self.config.output_hidden_states or output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        if not return_dict:
            return tuple((v for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_router_probs] if v is not None))
        return MoEModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, router_probs=all_router_probs)

@add_start_docstrings('The bare GPTSAN-japanese Model with a language modeling head.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
    _tied_weights_keys = ['lm_head.weight']

    def __init__(self, config: GPTSanJapaneseConfig):
        super().__init__(config)
        self.model = GPTSanJapaneseModel(config)
        self.register_buffer('final_logits_bias', torch.zeros([1, config.vocab_size]))
        self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
        if not self.config.torchscript:
            self.lm_head.weight = self.model.embed_tokens.weight

    @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
    def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
        SEG_TOKEN = self.config.separator_token_id
        use_cache = use_cache or self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        model_return_dict = True
        num_precontext = None
        if input_ids is not None:
            num_batch = input_ids.shape[0]
            num_precontext = torch.zeros([num_batch]).int().to(input_ids.device)
            where_separators = torch.where(input_ids == SEG_TOKEN)
            num_precontext[where_separators[0]] += where_separators[1]
            num_precontext = num_precontext.unsqueeze(1)
        outputs = self.model(input_ids, attention_mask, token_type_ids, spout, past_key_values, head_mask, use_cache, inputs_embeds, decoder_inputs_embeds, output_attentions, output_hidden_states, model_return_dict, output_router_logits, num_precontext)
        lm_logits = self.lm_head(outputs[0])
        if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]:
            lm_logits = lm_logits + self.final_logits_bias
        loss = None
        z_loss = None
        router_probs = None
        aux_loss = None
        if labels is not None:
            labels = labels.to(lm_logits.device)
            loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
            if output_router_logits:
                (router_logits, expert_indexes) = self._unpack_router_logits(outputs.router_probs)
                z_loss = router_z_loss_func(router_logits)
                router_probs = nn.Softmax(dim=-1)(router_logits)
                aux_loss = load_balancing_loss_func(router_probs, expert_indexes)
            loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
        if not return_dict:
            return tuple((v for v in [loss, lm_logits, outputs.past_key_values, outputs.hidden_states, outputs.router_probs, z_loss, aux_loss] if v is not None))
        return MoECausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_probs, z_loss=z_loss, aux_loss=aux_loss)

    def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[Union[List, torch.FloatTensor]]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, **kwargs):
        if isinstance(spout, list):
            spout = torch.tensor(spout).float()
            if input_ids is not None:
                spout = spout.to(input_ids.device)
        if past_key_values is not None:
            return {'input_ids': input_ids[:, -1:] if input_ids is not None else None, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids[:, -1:] if token_type_ids is not None else None, 'spout': spout, 'past_key_values': past_key_values}
        return {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'spout': spout, 'past_key_values': None}

    def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
        return self._shift_right(labels)

    def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None) -> nn.Embedding:
        new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
        self._resize_final_logits_bias(new_embeddings.weight.shape[0])
        return new_embeddings

    def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
        old_num_tokens = self.final_logits_bias.shape[-1]
        if new_num_tokens <= old_num_tokens:
            new_bias = self.final_logits_bias[:, :new_num_tokens]
        else:
            extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
            new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
        self.register_buffer('final_logits_bias', new_bias)

    def get_input_embeddings(self):
        return self.model.get_input_embeddings()

    def set_input_embeddings(self, new_embeddings):
        self.model.set_input_embeddings(new_embeddings)

    def set_output_embeddings(self, new_embeddings):
        self.lm_head = new_embeddings

    def get_output_embeddings(self):
        return self.lm_head

    def _unpack_router_logits(self, router_outputs):
        total_router_logits = []
        total_expert_indexes = []
        for router_output in router_outputs:
            if len(router_output[0].shape) > 1:
                (router_logits, expert_indexes) = router_output
                total_router_logits.append(router_logits)
                total_expert_indexes.append(expert_indexes)
        return (torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1))

# File: transformers-main/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
""""""
import collections
import json
import os
import re
from typing import List, Optional, Tuple, Union
import numpy as np
from ....tokenization_utils import PreTrainedTokenizer
from ....tokenization_utils_base import BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, TextInput, TextInputPair, TruncationStrategy
from ....utils import PaddingStrategy, logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}

def load_vocab_and_emoji(vocab_file, emoji_file):
    with open(emoji_file, 'r', encoding='utf-8') as f:
        emoji = json.loads(f.read())
    vocab = collections.OrderedDict()
    raw_vocab = collections.OrderedDict()
    ids_to_tokens = collections.OrderedDict()
    with open(vocab_file, 'r', encoding='utf-8') as f:
        token = f.readlines()
    token = [[t.rstrip('\n')] if t == ',\n' or ',' not in t else t.rstrip('\n').split(',') for t in token]
    for (idx, b) in enumerate(token):
        ids_to_tokens[idx] = b
        raw_vocab[','.join(b)] = idx
        for wd in b:
            vocab[wd] = idx
    return (vocab, raw_vocab, ids_to_tokens, emoji)

class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
    vocab_files_names = VOCAB_FILES_NAMES
    model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']

    def __init__(self, vocab_file, emoji_file, unk_token='<|nottoken|>', pad_token='<|separator|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', sep_token='<|segmenter|>', do_clean_text=False, **kwargs):
        if not os.path.isfile(vocab_file):
            raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
        if not os.path.isfile(emoji_file):
            raise ValueError(f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
        self.do_clean_text = do_clean_text
        (self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji) = load_vocab_and_emoji(vocab_file, emoji_file)
        self.subword_tokenizer = SubWordJapaneseTokenizer(vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
        super().__init__(unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs)

    @property
    def vocab_size(self):
        return len(self.raw_vocab)

    def get_vocab(self):
        return dict(self.raw_vocab, **self.added_tokens_encoder)

    def _tokenize(self, text):
        return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)

    def _convert_token_to_id(self, token):
        return self.vocab.get(token, self.vocab.get(self.unk_token))

    def _convert_id_to_token(self, index):
        return self.subword_tokenizer.convert_id_to_token(index)

    def convert_tokens_to_string(self, tokens):
        words = []
        byte_tokens = []
        for word in tokens:
            if word[:6] == '<|byte' and word[-2:] == '|>':
                byte_tokens.append(int(word[6:-2]))
            else:
                if len(byte_tokens) > 0:
                    words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
                    byte_tokens = []
                if word[:7] == '<|emoji' and word[-2:] == '|>':
                    words.append(self.emoji['emoji_inv'][word])
                elif word == '':
                    words.append(' ')
                elif word == '
': words.append('\n') elif word == '': words.append('\t') elif word == '': words.append('▀') elif word == '': words.append('ǀ') elif word == '': words.append('‖') elif word == '<|bagoftoken|>': if len(words) > 0: words.append(words[-1]) words.append(words[-1]) words.append(words[-1]) elif word.startswith('<|') and word.endswith('|>'): words.append('') else: words.append(word) if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode('utf-8', errors='replace')) text = ''.join(words) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) emoji_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] emoji_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] with open(vocab_file, 'w', encoding='utf-8') as writer: for (token_index, token) in self.ids_to_tokens.items(): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(','.join(token) + '\n') index += 1 with open(emoji_file, 'w', encoding='utf-8') as writer: json.dump(self.emoji, writer) return (vocab_file, emoji_file) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: prefix_len = 0 if self.sep_token in self.vocab: segid = self.vocab[self.sep_token] if segid in token_ids_0: prefix_len = token_ids_0.index(segid) if token_ids_1 is None: total_len = len(token_ids_0) else: total_len = len(token_ids_0 + token_ids_1) return prefix_len * [1] + (total_len - prefix_len) * [0] def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs): if add_sep_token is None: add_sep_token = self.sep_token not in text prepared = self.bos_token if self.bos_token in self.vocab else '' prepared += prefix_text if prefix_text is not None else '' if add_sep_token: prepared += self.sep_token if self.sep_token in self.vocab else '' prepared += text return (prepared, kwargs) def _batch_encode_plus(self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): batch_prefix_texts = [] for (pref, txt) in batch_text_or_text_pairs: batch_prefix_texts.append(pref + self.sep_token + txt) batch_text_or_text_pairs = batch_prefix_texts return super()._batch_encode_plus(batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs) class SubWordJapaneseTokenizer: def __init__(self, vocab, ids_to_tokens, emoji): self.vocab = vocab self.ids_to_tokens = ids_to_tokens self.emoji = emoji self.maxlen = np.max([len(w) for w in self.vocab.keys()]) self.content_repatter1 = re.compile("(https?|ftp)(:\\/\\/[-_\\.!~*\\'()a-zA-Z0-9;\\/?:\\@&=\\+$,%#]+)") self.content_repatter2 = re.compile('[A-Za-z0-9\\._+]*@[\\-_0-9A-Za-z]+(\\.[A-Za-z]+)*') self.content_repatter3 = re.compile('[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}') self.content_repatter4 = re.compile('([12]\\d{3}[/\\-年])*(0?[1-9]|1[0-2])[/\\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') self.content_repatter5 = re.compile('(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\\u32ff)\\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') self.content_repatter6 = re.compile('((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*億)*((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*万)*((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*千)*(0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\\(税込\\)|\\(税抜\\)|\\+tax)*') keisen = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' blocks = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' self.content_trans1 = str.maketrans({k: '' for k in keisen + blocks}) def __len__(self): return len(self.ids_to_tokens) def clean_text(self, content): content = self.content_repatter1.sub('', content) content = self.content_repatter2.sub('', content) content = self.content_repatter3.sub('', content) content = self.content_repatter4.sub('', content) content = self.content_repatter5.sub('', content) content = self.content_repatter6.sub('', content) content = content.translate(self.content_trans1) while '' in content: content = content.replace('', '') return content def tokenize(self, text, clean=False): text = text.replace(' ', '') text = text.replace('\u3000', '') text = text.replace('\r\n', '
') text = text.replace('\n', '
') text = text.replace('\r', '
') text = text.replace('\t', '') text = text.replace('—', 'ー') text = text.replace('−', 'ー') for (k, v) in self.emoji['emoji'].items(): if k in text: text = text.replace(k, v) if clean: text = self.clean_text(text) def check_simbol(x): e = x.encode() if len(x) == 1 and len(e) == 2: c = (int(e[0]) << 8) + int(e[1]) if c >= 49825 and c <= 49855 or (c >= 51072 and c <= 51075) or (c >= 51897 and c <= 52159) or (c >= 52352 and c <= 52642): return True return False def checku2e(x): e = x.encode() if len(x) == 1 and len(e) == 3: c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 14844032 and c <= 14856319: return True return False pos = 0 result = [] while pos < len(text): end = min(len(text), pos + self.maxlen + 1) if text[pos] == '<' else pos + 3 candidates = [] for e in range(end, pos, -1): wd = text[pos:e] if wd in self.vocab: if wd[0] == '<' and len(wd) > 2: candidates = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: (_, wd, e) = sorted(candidates, key=lambda x: x[0])[0] result.append(wd) pos = e else: end = pos + 1 wd = text[pos:end] if check_simbol(wd): result.append('') elif checku2e(wd): result.append('') else: for i in wd.encode('utf-8'): result.append('<|byte%d|>' % i) pos = end return result def convert_id_to_token(self, index): return self.ids_to_tokens[index][0] # File: transformers-main/src/transformers/models/deprecated/graphormer/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_graphormer': ['GraphormerConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_graphormer'] = ['GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel'] if TYPE_CHECKING: from .configuration_graphormer import GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/graphormer/collating_graphormer.py from typing import Any, Dict, List, Mapping import numpy as np import torch from ....utils import is_cython_available, requires_backends if is_cython_available(): import pyximport pyximport.install(setup_args={'include_dirs': np.get_include()}) from . import algos_graphormer def convert_to_single_emb(x, offset: int=512): feature_num = x.shape[1] if len(x.shape) > 1 else 1 feature_offset = 1 + np.arange(0, feature_num * offset, offset, dtype=np.int64) x = x + feature_offset return x def preprocess_item(item, keep_features=True): requires_backends(preprocess_item, ['cython']) if keep_features and 'edge_attr' in item.keys(): edge_attr = np.asarray(item['edge_attr'], dtype=np.int64) else: edge_attr = np.ones((len(item['edge_index'][0]), 1), dtype=np.int64) if keep_features and 'node_feat' in item.keys(): node_feature = np.asarray(item['node_feat'], dtype=np.int64) else: node_feature = np.ones((item['num_nodes'], 1), dtype=np.int64) edge_index = np.asarray(item['edge_index'], dtype=np.int64) input_nodes = convert_to_single_emb(node_feature) + 1 num_nodes = item['num_nodes'] if len(edge_attr.shape) == 1: edge_attr = edge_attr[:, None] attn_edge_type = np.zeros([num_nodes, num_nodes, edge_attr.shape[-1]], dtype=np.int64) attn_edge_type[edge_index[0], edge_index[1]] = convert_to_single_emb(edge_attr) + 1 adj = np.zeros([num_nodes, num_nodes], dtype=bool) adj[edge_index[0], edge_index[1]] = True (shortest_path_result, path) = algos_graphormer.floyd_warshall(adj) max_dist = np.amax(shortest_path_result) input_edges = algos_graphormer.gen_edge_input(max_dist, path, attn_edge_type) attn_bias = np.zeros([num_nodes + 1, num_nodes + 1], dtype=np.single) item['input_nodes'] = input_nodes + 1 item['attn_bias'] = attn_bias item['attn_edge_type'] = attn_edge_type item['spatial_pos'] = shortest_path_result.astype(np.int64) + 1 item['in_degree'] = np.sum(adj, axis=1).reshape(-1) + 1 item['out_degree'] = item['in_degree'] item['input_edges'] = input_edges + 1 if 'labels' not in item: item['labels'] = item['y'] return item class GraphormerDataCollator: def __init__(self, spatial_pos_max=20, on_the_fly_processing=False): if not is_cython_available(): raise ImportError('Graphormer preprocessing needs Cython (pyximport)') self.spatial_pos_max = spatial_pos_max self.on_the_fly_processing = on_the_fly_processing def __call__(self, features: List[dict]) -> Dict[str, Any]: if self.on_the_fly_processing: features = [preprocess_item(i) for i in features] if not isinstance(features[0], Mapping): features = [vars(f) for f in features] batch = {} max_node_num = max((len(i['input_nodes']) for i in features)) node_feat_size = len(features[0]['input_nodes'][0]) edge_feat_size = len(features[0]['attn_edge_type'][0][0]) max_dist = max((len(i['input_edges'][0][0]) for i in features)) edge_input_size = len(features[0]['input_edges'][0][0][0]) batch_size = len(features) batch['attn_bias'] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float) batch['attn_edge_type'] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long) batch['spatial_pos'] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long) batch['in_degree'] = torch.zeros(batch_size, max_node_num, dtype=torch.long) batch['input_nodes'] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long) batch['input_edges'] = torch.zeros(batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long) for (ix, f) in enumerate(features): for k in ['attn_bias', 'attn_edge_type', 'spatial_pos', 'in_degree', 'input_nodes', 'input_edges']: f[k] = torch.tensor(f[k]) if len(f['attn_bias'][1:, 1:][f['spatial_pos'] >= self.spatial_pos_max]) > 0: f['attn_bias'][1:, 1:][f['spatial_pos'] >= self.spatial_pos_max] = float('-inf') batch['attn_bias'][ix, :f['attn_bias'].shape[0], :f['attn_bias'].shape[1]] = f['attn_bias'] batch['attn_edge_type'][ix, :f['attn_edge_type'].shape[0], :f['attn_edge_type'].shape[1], :] = f['attn_edge_type'] batch['spatial_pos'][ix, :f['spatial_pos'].shape[0], :f['spatial_pos'].shape[1]] = f['spatial_pos'] batch['in_degree'][ix, :f['in_degree'].shape[0]] = f['in_degree'] batch['input_nodes'][ix, :f['input_nodes'].shape[0], :] = f['input_nodes'] batch['input_edges'][ix, :f['input_edges'].shape[0], :f['input_edges'].shape[1], :f['input_edges'].shape[2], :] = f['input_edges'] batch['out_degree'] = batch['in_degree'] sample = features[0]['labels'] if len(sample) == 1: if isinstance(sample[0], float): batch['labels'] = torch.from_numpy(np.concatenate([i['labels'] for i in features])) else: batch['labels'] = torch.from_numpy(np.concatenate([i['labels'] for i in features])) else: batch['labels'] = torch.from_numpy(np.stack([i['labels'] for i in features], axis=0)) return batch # File: transformers-main/src/transformers/models/deprecated/graphormer/configuration_graphormer.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class GraphormerConfig(PretrainedConfig): model_type = 'graphormer' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, num_classes: int=1, num_atoms: int=512 * 9, num_edges: int=512 * 3, num_in_degree: int=512, num_out_degree: int=512, num_spatial: int=512, num_edge_dis: int=128, multi_hop_max_dist: int=5, spatial_pos_max: int=1024, edge_type: str='multi_hop', max_nodes: int=512, share_input_output_embed: bool=False, num_hidden_layers: int=12, embedding_dim: int=768, ffn_embedding_dim: int=768, num_attention_heads: int=32, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, layerdrop: float=0.0, encoder_normalize_before: bool=False, pre_layernorm: bool=False, apply_graphormer_init: bool=False, activation_fn: str='gelu', embed_scale: float=None, freeze_embeddings: bool=False, num_trans_layers_to_freeze: int=0, traceable: bool=False, q_noise: float=0.0, qn_block_size: int=8, kdim: int=None, vdim: int=None, bias: bool=True, self_attention: bool=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs): self.num_classes = num_classes self.num_atoms = num_atoms self.num_in_degree = num_in_degree self.num_out_degree = num_out_degree self.num_edges = num_edges self.num_spatial = num_spatial self.num_edge_dis = num_edge_dis self.edge_type = edge_type self.multi_hop_max_dist = multi_hop_max_dist self.spatial_pos_max = spatial_pos_max self.max_nodes = max_nodes self.num_hidden_layers = num_hidden_layers self.embedding_dim = embedding_dim self.hidden_size = embedding_dim self.ffn_embedding_dim = ffn_embedding_dim self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.encoder_normalize_before = encoder_normalize_before self.pre_layernorm = pre_layernorm self.apply_graphormer_init = apply_graphormer_init self.activation_fn = activation_fn self.embed_scale = embed_scale self.freeze_embeddings = freeze_embeddings self.num_trans_layers_to_freeze = num_trans_layers_to_freeze self.share_input_output_embed = share_input_output_embed self.traceable = traceable self.q_noise = q_noise self.qn_block_size = qn_block_size self.kdim = kdim self.vdim = vdim self.self_attention = self_attention self.bias = bias super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) # File: transformers-main/src/transformers/models/deprecated/graphormer/modeling_graphormer.py """""" import math from typing import Iterable, Iterator, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithNoAttention, SequenceClassifierOutput from ....modeling_utils import PreTrainedModel from ....utils import logging from .configuration_graphormer import GraphormerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'graphormer-base-pcqm4mv1' _CONFIG_FOR_DOC = 'GraphormerConfig' def quant_noise(module: nn.Module, p: float, block_size: int): if p <= 0: return module if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)): raise NotImplementedError('Module unsupported for quant_noise.') is_conv = module.weight.ndim == 4 if not is_conv: if module.weight.size(1) % block_size != 0: raise AssertionError('Input features must be a multiple of block sizes') elif module.kernel_size == (1, 1): if module.in_channels % block_size != 0: raise AssertionError('Input channels must be a multiple of block sizes') else: k = module.kernel_size[0] * module.kernel_size[1] if k % block_size != 0: raise AssertionError('Kernel size must be a multiple of block size') def _forward_pre_hook(mod, input): if mod.training: if not is_conv: weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) mask = torch.zeros(in_features // block_size * out_features, device=weight.device) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels if mod.kernel_size == (1, 1): mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device) mask.bernoulli_(p) mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) mask = mask.to(torch.bool) s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module class LayerDropModuleList(nn.ModuleList): def __init__(self, p: float, modules: Optional[Iterable[nn.Module]]=None): super().__init__(modules) self.p = p def __iter__(self) -> Iterator[nn.Module]: dropout_probs = torch.empty(len(self)).uniform_() for (i, m) in enumerate(super().__iter__()): if not self.training or dropout_probs[i] > self.p: yield m class GraphormerGraphNodeFeature(nn.Module): def __init__(self, config: GraphormerConfig): super().__init__() self.num_heads = config.num_attention_heads self.num_atoms = config.num_atoms self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id) self.in_degree_encoder = nn.Embedding(config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id) self.out_degree_encoder = nn.Embedding(config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id) self.graph_token = nn.Embedding(1, config.hidden_size) def forward(self, input_nodes: torch.LongTensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor) -> torch.Tensor: (n_graph, n_node) = input_nodes.size()[:2] node_feature = self.atom_encoder(input_nodes).sum(dim=-2) + self.in_degree_encoder(in_degree) + self.out_degree_encoder(out_degree) graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1) graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1) return graph_node_feature class GraphormerGraphAttnBias(nn.Module): def __init__(self, config: GraphormerConfig): super().__init__() self.num_heads = config.num_attention_heads self.multi_hop_max_dist = config.multi_hop_max_dist self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0) self.edge_type = config.edge_type if self.edge_type == 'multi_hop': self.edge_dis_encoder = nn.Embedding(config.num_edge_dis * config.num_attention_heads * config.num_attention_heads, 1) self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0) self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads) def forward(self, input_nodes: torch.LongTensor, attn_bias: torch.Tensor, spatial_pos: torch.LongTensor, input_edges: torch.LongTensor, attn_edge_type: torch.LongTensor) -> torch.Tensor: (n_graph, n_node) = input_nodes.size()[:2] graph_attn_bias = attn_bias.clone() graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(1, self.num_heads, 1, 1) spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2) graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1) graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t if self.edge_type == 'multi_hop': spatial_pos_ = spatial_pos.clone() spatial_pos_[spatial_pos_ == 0] = 1 spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_) if self.multi_hop_max_dist > 0: spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist) input_edges = input_edges[:, :, :, :self.multi_hop_max_dist, :] input_edges = self.edge_encoder(input_edges).mean(-2) max_dist = input_edges.size(-2) edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads) edge_input_flat = torch.bmm(edge_input_flat, self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :]) input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(1, 2, 3, 0, 4) input_edges = (input_edges.sum(-2) / spatial_pos_.float().unsqueeze(-1)).permute(0, 3, 1, 2) else: input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2) graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1) return graph_attn_bias class GraphormerMultiheadAttention(nn.Module): def __init__(self, config: GraphormerConfig): super().__init__() self.embedding_dim = config.embedding_dim self.kdim = config.kdim if config.kdim is not None else config.embedding_dim self.vdim = config.vdim if config.vdim is not None else config.embedding_dim self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim self.num_heads = config.num_attention_heads self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False) self.head_dim = config.embedding_dim // config.num_attention_heads if not self.head_dim * config.num_attention_heads == self.embedding_dim: raise AssertionError('The embedding_dim must be divisible by num_heads.') self.scaling = self.head_dim ** (-0.5) self.self_attention = True if not self.self_attention: raise NotImplementedError('The Graphormer model only supports self attention for now.') if self.self_attention and (not self.qkv_same_dim): raise AssertionError('Self-attention requires query, key and value to be of the same size.') self.k_proj = quant_noise(nn.Linear(self.kdim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size) self.v_proj = quant_noise(nn.Linear(self.vdim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size) self.q_proj = quant_noise(nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size) self.out_proj = quant_noise(nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size) self.onnx_trace = False def reset_parameters(self): if self.qkv_same_dim: nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) def forward(self, query: torch.LongTensor, key: Optional[torch.Tensor], value: Optional[torch.Tensor], attn_bias: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, need_weights: bool=True, attn_mask: Optional[torch.Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if need_head_weights: need_weights = True (tgt_len, bsz, embedding_dim) = query.size() src_len = tgt_len if not embedding_dim == self.embedding_dim: raise AssertionError(f'The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim {self.embedding_dim}.') if not list(query.size()) == [tgt_len, bsz, embedding_dim]: raise AssertionError('Query size incorrect in Graphormer, compared to model dimensions.') if key is not None: (src_len, key_bsz, _) = key.size() if not torch.jit.is_scripting(): if key_bsz != bsz or value is None or (not (src_len, bsz == value.shape[:2])): raise AssertionError('The batch shape does not match the key or value shapes provided to the attention.') q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) q *= self.scaling q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) if k is not None: k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) if v is not None: v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) if k is None or not k.size(1) == src_len: raise AssertionError('The shape of the key generated in the attention is incorrect') if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len: raise AssertionError('The shape of the generated padding mask for the key does not match expected dimensions.') attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]: raise AssertionError('The attention weights generated do not match the expected dimensions.') if attn_bias is not None: attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float('-inf')) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return (attn_weights, v) attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.attention_dropout_module(attn_weights) if v is None: raise AssertionError('No value generated') attn = torch.bmm(attn_probs, v) if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]: raise AssertionError('The attention generated do not match the expected dimensions.') attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim) attn: torch.Tensor = self.out_proj(attn) attn_weights = None if need_weights: attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0) if not need_head_weights: attn_weights = attn_weights.mean(dim=0) return (attn, attn_weights) def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor: return attn_weights class GraphormerGraphEncoderLayer(nn.Module): def __init__(self, config: GraphormerConfig) -> None: super().__init__() self.embedding_dim = config.embedding_dim self.num_attention_heads = config.num_attention_heads self.q_noise = config.q_noise self.qn_block_size = config.qn_block_size self.pre_layernorm = config.pre_layernorm self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False) self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False) self.activation_fn = ACT2FN[config.activation_fn] self.self_attn = GraphormerMultiheadAttention(config) self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim) self.fc1 = self.build_fc(self.embedding_dim, config.ffn_embedding_dim, q_noise=config.q_noise, qn_block_size=config.qn_block_size) self.fc2 = self.build_fc(config.ffn_embedding_dim, self.embedding_dim, q_noise=config.q_noise, qn_block_size=config.qn_block_size) self.final_layer_norm = nn.LayerNorm(self.embedding_dim) def build_fc(self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]: return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) def forward(self, input_nodes: torch.Tensor, self_attn_bias: Optional[torch.Tensor]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: residual = input_nodes if self.pre_layernorm: input_nodes = self.self_attn_layer_norm(input_nodes) (input_nodes, attn) = self.self_attn(query=input_nodes, key=input_nodes, value=input_nodes, attn_bias=self_attn_bias, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask) input_nodes = self.dropout_module(input_nodes) input_nodes = residual + input_nodes if not self.pre_layernorm: input_nodes = self.self_attn_layer_norm(input_nodes) residual = input_nodes if self.pre_layernorm: input_nodes = self.final_layer_norm(input_nodes) input_nodes = self.activation_fn(self.fc1(input_nodes)) input_nodes = self.activation_dropout_module(input_nodes) input_nodes = self.fc2(input_nodes) input_nodes = self.dropout_module(input_nodes) input_nodes = residual + input_nodes if not self.pre_layernorm: input_nodes = self.final_layer_norm(input_nodes) return (input_nodes, attn) class GraphormerGraphEncoder(nn.Module): def __init__(self, config: GraphormerConfig): super().__init__() self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False) self.layerdrop = config.layerdrop self.embedding_dim = config.embedding_dim self.apply_graphormer_init = config.apply_graphormer_init self.traceable = config.traceable self.graph_node_feature = GraphormerGraphNodeFeature(config) self.graph_attn_bias = GraphormerGraphAttnBias(config) self.embed_scale = config.embed_scale if config.q_noise > 0: self.quant_noise = quant_noise(nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), config.q_noise, config.qn_block_size) else: self.quant_noise = None if config.encoder_normalize_before: self.emb_layer_norm = nn.LayerNorm(self.embedding_dim) else: self.emb_layer_norm = None if config.pre_layernorm: self.final_layer_norm = nn.LayerNorm(self.embedding_dim) if self.layerdrop > 0.0: self.layers = LayerDropModuleList(p=self.layerdrop) else: self.layers = nn.ModuleList([]) self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)]) if config.freeze_embeddings: raise NotImplementedError('Freezing embeddings is not implemented yet.') for layer in range(config.num_trans_layers_to_freeze): m = self.layers[layer] if m is not None: for p in m.parameters(): p.requires_grad = False def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb=None, last_state_only: bool=False, token_embeddings: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None) -> Tuple[Union[torch.Tensor, List[torch.LongTensor]], torch.Tensor]: data_x = input_nodes (n_graph, n_node) = data_x.size()[:2] padding_mask = data_x[:, :, 0].eq(0) padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype) padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1) attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type) if token_embeddings is not None: input_nodes = token_embeddings else: input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree) if perturb is not None: input_nodes[:, 1:, :] += perturb if self.embed_scale is not None: input_nodes = input_nodes * self.embed_scale if self.quant_noise is not None: input_nodes = self.quant_noise(input_nodes) if self.emb_layer_norm is not None: input_nodes = self.emb_layer_norm(input_nodes) input_nodes = self.dropout_module(input_nodes) input_nodes = input_nodes.transpose(0, 1) inner_states = [] if not last_state_only: inner_states.append(input_nodes) for layer in self.layers: (input_nodes, _) = layer(input_nodes, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask, self_attn_bias=attn_bias) if not last_state_only: inner_states.append(input_nodes) graph_rep = input_nodes[0, :, :] if last_state_only: inner_states = [input_nodes] if self.traceable: return (torch.stack(inner_states), graph_rep) else: return (inner_states, graph_rep) class GraphormerDecoderHead(nn.Module): def __init__(self, embedding_dim: int, num_classes: int): super().__init__() '' self.lm_output_learned_bias = nn.Parameter(torch.zeros(1)) self.classifier = nn.Linear(embedding_dim, num_classes, bias=False) self.num_classes = num_classes def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor: input_nodes = self.classifier(input_nodes) input_nodes = input_nodes + self.lm_output_learned_bias return input_nodes class GraphormerPreTrainedModel(PreTrainedModel): config_class = GraphormerConfig base_model_prefix = 'graphormer' main_input_name_nodes = 'input_nodes' main_input_name_edges = 'input_edges' def normal_(self, data: torch.Tensor): data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]): if isinstance(module, nn.Linear): self.normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): self.normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, GraphormerMultiheadAttention): self.normal_(module.q_proj.weight.data) self.normal_(module.k_proj.weight.data) self.normal_(module.v_proj.weight.data) def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder]): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=0.02) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, GraphormerMultiheadAttention): module.q_proj.weight.data.normal_(mean=0.0, std=0.02) module.k_proj.weight.data.normal_(mean=0.0, std=0.02) module.v_proj.weight.data.normal_(mean=0.0, std=0.02) module.reset_parameters() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, GraphormerGraphEncoder): if module.apply_graphormer_init: module.apply(self.init_graphormer_params) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class GraphormerModel(GraphormerPreTrainedModel): def __init__(self, config: GraphormerConfig): super().__init__(config) self.max_nodes = config.max_nodes self.graph_encoder = GraphormerGraphEncoder(config) self.share_input_output_embed = config.share_input_output_embed self.lm_output_learned_bias = None self.load_softmax = not getattr(config, 'remove_head', False) self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim) self.activation_fn = ACT2FN[config.activation_fn] self.layer_norm = nn.LayerNorm(config.embedding_dim) self.post_init() def reset_output_layer_parameters(self): self.lm_output_learned_bias = nn.Parameter(torch.zeros(1)) def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb: Optional[torch.FloatTensor]=None, masked_tokens: None=None, return_dict: Optional[bool]=None, **unused) -> Union[Tuple[torch.LongTensor], BaseModelOutputWithNoAttention]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict (inner_states, graph_rep) = self.graph_encoder(input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb) input_nodes = inner_states[-1].transpose(0, 1) if masked_tokens is not None: raise NotImplementedError input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes))) if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, 'weight'): input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight) if not return_dict: return tuple((x for x in [input_nodes, inner_states] if x is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states) def max_nodes(self): return self.max_nodes class GraphormerForGraphClassification(GraphormerPreTrainedModel): def __init__(self, config: GraphormerConfig): super().__init__(config) self.encoder = GraphormerModel(config) self.embedding_dim = config.embedding_dim self.num_classes = config.num_classes self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes) self.is_encoder_decoder = True self.post_init() def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, **unused) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, return_dict=True) (outputs, hidden_states) = (encoder_outputs['last_hidden_state'], encoder_outputs['hidden_states']) head_outputs = self.classifier(outputs) logits = head_outputs[:, 0, :].contiguous() loss = None if labels is not None: mask = ~torch.isnan(labels) if self.num_classes == 1: loss_fct = MSELoss() loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float()) elif self.num_classes > 1 and len(labels.shape) == 1: loss_fct = CrossEntropyLoss() loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1)) else: loss_fct = BCEWithLogitsLoss(reduction='sum') loss = loss_fct(logits[mask], labels[mask]) if not return_dict: return tuple((x for x in [loss, logits, hidden_states] if x is not None)) return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None) # File: transformers-main/src/transformers/models/deprecated/jukebox/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_jukebox': ['JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig'], 'tokenization_jukebox': ['JukeboxTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_jukebox'] = ['JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior'] if TYPE_CHECKING: from .configuration_jukebox import JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/jukebox/configuration_jukebox.py """""" import os from typing import List, Union from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) _LARGE_ATTENTION = ['block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'block_attn', 'transpose_block_attn', 'prev_block_attn', 'cross_attention'] _RawColumnPreviousRowAttention = ['block_attn', 'transpose_block_attn', 'prev_block_attn'] _FullDenseAttention = ['dense_attention'] _PrimePrimeDenseAttention = ['prime_attn', 'prime_attn', 'dense_attn'] def full_dense_attention(layer): return _FullDenseAttention[0] def raw_column_previous_row_attention(layer): return _RawColumnPreviousRowAttention[layer % 3] def large_separated_enc_dec_w_lyrics(layer): return _LARGE_ATTENTION[layer % 79] def enc_dec_with_lyrics(layer): if layer % 16 == 15: return _PrimePrimeDenseAttention[layer % 3] return _RawColumnPreviousRowAttention[layer % 3] ATTENTION_PATTERNS = {'full_dense_attention': full_dense_attention, 'raw_column_previous_row_attention': raw_column_previous_row_attention, 'large_separated_enc_dec_w_lyrics': large_separated_enc_dec_w_lyrics, 'enc_dec_with_lyrics': enc_dec_with_lyrics} class JukeboxPriorConfig(PretrainedConfig): model_type = 'jukebox_prior' attribute_map = {'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head'} def __init__(self, act_fn='quick_gelu', level=0, alignment_head=2, alignment_layer=68, attention_multiplier=0.25, attention_pattern='enc_dec_with_lyrics', attn_dropout=0, attn_res_scale=False, blocks=64, conv_res_scale=None, num_layers=72, emb_dropout=0, encoder_config=None, encoder_loss_fraction=0.4, hidden_size=2048, init_scale=0.2, is_encoder_decoder=True, lyric_vocab_size=80, mask=False, max_duration=600, max_nb_genres=1, merged_decoder=True, metadata_conditioning=True, metadata_dims=[604, 7898], min_duration=0, mlp_multiplier=1.0, music_vocab_size=2048, n_ctx=6144, n_heads=2, nb_relevant_lyric_tokens=384, res_conv_depth=3, res_conv_width=128, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=1, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], resid_dropout=0, sampling_rate=44100, spread=None, timing_dims=64, zero_out=False, **kwargs): self.act_fn = act_fn self.alignment_head = alignment_head self.alignment_layer = alignment_layer self.attention_multiplier = attention_multiplier self.attention_pattern = attention_pattern self.attn_dropout = attn_dropout self.attn_res_scale = attn_res_scale self.blocks = blocks self.conv_res_scale = conv_res_scale self.num_layers = num_layers self.emb_dropout = emb_dropout self.music_vocab_size = music_vocab_size if encoder_config is not None: self.encoder_config = JukeboxPriorConfig(**encoder_config) else: self.encoder_config = None self.encoder_loss_fraction = encoder_loss_fraction self.init_scale = init_scale self.is_encoder_decoder = is_encoder_decoder self.lyric_vocab_size = lyric_vocab_size self.level = level self.mask = mask self.max_duration = max_duration self.max_nb_genres = max_nb_genres self.merged_decoder = merged_decoder self.metadata_conditioning = metadata_conditioning self.metadata_dims = metadata_dims self.min_duration = min_duration self.mlp_multiplier = mlp_multiplier self.n_ctx = n_ctx self.n_heads = n_heads self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens self.res_conv_depth = res_conv_depth self.res_conv_width = res_conv_width self.res_convolution_multiplier = res_convolution_multiplier self.res_dilation_cycle = res_dilation_cycle self.res_dilation_growth_rate = res_dilation_growth_rate self.res_downs_t = res_downs_t self.res_strides_t = res_strides_t self.resid_dropout = resid_dropout self.sampling_rate = sampling_rate self.spread = spread self.timing_dims = timing_dims self.hidden_size = hidden_size self.zero_out = zero_out @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'jukebox': config_dict = config_dict[f'prior_{level}'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class JukeboxVQVAEConfig(PretrainedConfig): model_type = 'jukebox_vqvae' def __init__(self, act_fn='relu', nb_discrete_codes=2048, commit=0.02, conv_input_shape=1, conv_res_scale=False, embed_dim=64, hop_fraction=[0.125, 0.5, 0.5], levels=3, lmu=0.99, multipliers=[2, 1, 1], res_conv_depth=4, res_conv_width=32, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=3, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], sample_length=1058304, init_scale=0.2, zero_out=False, **kwargs): self.hop_fraction = hop_fraction self.conv_input_shape = conv_input_shape self.sample_length = sample_length self.levels = levels self.embed_dim = embed_dim self.nb_discrete_codes = nb_discrete_codes self.res_conv_width = res_conv_width self.res_conv_depth = res_conv_depth self.res_convolution_multiplier = res_convolution_multiplier self.res_dilation_growth_rate = res_dilation_growth_rate self.res_dilation_cycle = res_dilation_cycle self.multipliers = multipliers self.res_downs_t = res_downs_t self.res_strides_t = res_strides_t self.lmu = lmu self.commit = commit self.conv_res_scale = conv_res_scale self.act_fn = act_fn self.init_scale = init_scale self.zero_out = zero_out @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'jukebox': config_dict = config_dict['vqvae_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class JukeboxConfig(PretrainedConfig): model_type = 'jukebox' def __init__(self, vqvae_config=None, prior_config_list=None, nb_priors=3, sampling_rate=44100, timing_dims=64, min_duration=0, max_duration=600.0, max_nb_genres=5, metadata_conditioning=True, **kwargs): if vqvae_config is None: vqvae_config = {} logger.info('vqvae_config is None. initializing the JukeboxVQVAE with default values.') self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config) if prior_config_list is not None: self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list] else: self.prior_configs = [] for prior_idx in range(nb_priors): prior_config = kwargs.pop(f'prior_{prior_idx}', None) if prior_config is None: prior_config = {} logger.info(f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default values.") self.prior_configs.append(JukeboxPriorConfig(**prior_config)) self.hop_fraction = self.vqvae_config.hop_fraction self.nb_priors = nb_priors self.max_nb_genres = max_nb_genres self.sampling_rate = sampling_rate self.timing_dims = timing_dims self.min_duration = min_duration self.max_duration = max_duration self.metadata_conditioning = metadata_conditioning super().__init__(**kwargs) @classmethod def from_configs(cls, prior_configs: List[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs): prior_config_list = [config.to_dict() for config in prior_configs] return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs) def to_dict(self): result = super().to_dict() result['prior_config_list'] = [config.to_dict() for config in result.pop('prior_configs')] return result # File: transformers-main/src/transformers/models/deprecated/jukebox/convert_jukebox.py """""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) PREFIX = 'https://openaipublic.azureedge.net/jukebox/models/' MODEL_MAPPING = {'jukebox-1b-lyrics': ['5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar'], 'jukebox-5b-lyrics': ['5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar']} def replace_key(key): if key.endswith('.model.1.bias') and len(key.split('.')) > 10: key = key.replace('.model.1.bias', '.conv1d_1.bias') elif key.endswith('.model.1.weight') and len(key.split('.')) > 10: key = key.replace('.model.1.weight', '.conv1d_1.weight') elif key.endswith('.model.3.bias') and len(key.split('.')) > 10: key = key.replace('.model.3.bias', '.conv1d_2.bias') elif key.endswith('.model.3.weight') and len(key.split('.')) > 10: key = key.replace('.model.3.weight', '.conv1d_2.weight') if 'conditioner_blocks.0.' in key: key = key.replace('conditioner_blocks.0', 'conditioner_blocks') if 'prime_prior' in key: key = key.replace('prime_prior', 'encoder') if '.emb.' in key and 'total' not in key and ('absolute' not in key) and ('relative' not in key): key = key.replace('.emb.', '.') if key.endswith('k'): return key.replace('.k', '.codebook') if 'y_emb.' in key: return key.replace('y_emb.', 'metadata_embedding.') if 'x_emb.emb.' in key: key = key.replace('0.x_emb.emb', 'embed_tokens') if 'prime_state_ln' in key: return key.replace('prime_state_ln', 'encoder.final_layer_norm') if '.ln' in key: return key.replace('.ln', '.layer_norm') if '_ln' in key: return key.replace('_ln', '_layer_norm') if 'prime_state_proj' in key: return key.replace('prime_state_proj', 'encoder.proj_in') if 'prime_x_out' in key: return key.replace('prime_x_out', 'encoder.lm_head') if 'prior.x_out' in key: return key.replace('x_out', 'fc_proj_out') if 'x_emb' in key: return key.replace('x_emb', 'embed_tokens') return key def fix_jukebox_keys(state_dict, model_state_dict, key_prefix, mapping): new_dict = {} import re re_encoder_block_conv_in = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).(bias|weight)') re_encoder_block_resnet = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)') re_encoder_block_proj_out = re.compile('encoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(bias|weight)') re_decoder_block_conv_out = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).(bias|weight)') re_decoder_block_resnet = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)') re_decoder_block_proj_in = re.compile('decoders.(\\d*).level_blocks.(\\d*).model.(\\d*).(bias|weight)') re_prior_cond_conv_out = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(\\d).(bias|weight)') re_prior_cond_resnet = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(\\d).model.(\\d*).model.(\\d*).(bias|weight)') re_prior_cond_proj_in = re.compile('conditioner_blocks.(\\d*).cond.model.(\\d*).(bias|weight)') for (original_key, value) in state_dict.items(): if re_encoder_block_conv_in.fullmatch(original_key): regex_match = re_encoder_block_conv_in.match(original_key) groups = regex_match.groups() block_index = int(groups[2]) * 2 + int(groups[3]) re_new_key = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' key = re_encoder_block_conv_in.sub(re_new_key, original_key) elif re_encoder_block_resnet.fullmatch(original_key): regex_match = re_encoder_block_resnet.match(original_key) groups = regex_match.groups() block_index = int(groups[2]) * 2 + int(groups[3]) conv_index = {'1': 1, '3': 2}[groups[-2]] prefix = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' resnet_block = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' re_new_key = prefix + resnet_block key = re_encoder_block_resnet.sub(re_new_key, original_key) elif re_encoder_block_proj_out.fullmatch(original_key): regex_match = re_encoder_block_proj_out.match(original_key) groups = regex_match.groups() re_new_key = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' key = re_encoder_block_proj_out.sub(re_new_key, original_key) elif re_decoder_block_conv_out.fullmatch(original_key): regex_match = re_decoder_block_conv_out.match(original_key) groups = regex_match.groups() block_index = int(groups[2]) * 2 + int(groups[3]) - 2 re_new_key = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' key = re_decoder_block_conv_out.sub(re_new_key, original_key) elif re_decoder_block_resnet.fullmatch(original_key): regex_match = re_decoder_block_resnet.match(original_key) groups = regex_match.groups() block_index = int(groups[2]) * 2 + int(groups[3]) - 2 conv_index = {'1': 1, '3': 2}[groups[-2]] prefix = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' resnet_block = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' re_new_key = prefix + resnet_block key = re_decoder_block_resnet.sub(re_new_key, original_key) elif re_decoder_block_proj_in.fullmatch(original_key): regex_match = re_decoder_block_proj_in.match(original_key) groups = regex_match.groups() re_new_key = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' key = re_decoder_block_proj_in.sub(re_new_key, original_key) elif re_prior_cond_conv_out.fullmatch(original_key): regex_match = re_prior_cond_conv_out.match(original_key) groups = regex_match.groups() block_index = int(groups[1]) * 2 + int(groups[2]) - 2 re_new_key = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' key = re_prior_cond_conv_out.sub(re_new_key, original_key) elif re_prior_cond_resnet.fullmatch(original_key): regex_match = re_prior_cond_resnet.match(original_key) groups = regex_match.groups() block_index = int(groups[1]) * 2 + int(groups[2]) - 2 conv_index = {'1': 1, '3': 2}[groups[-2]] prefix = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' resnet_block = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' re_new_key = prefix + resnet_block key = re_prior_cond_resnet.sub(re_new_key, original_key) elif re_prior_cond_proj_in.fullmatch(original_key): regex_match = re_prior_cond_proj_in.match(original_key) groups = regex_match.groups() re_new_key = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' key = re_prior_cond_proj_in.sub(re_new_key, original_key) else: key = original_key key = replace_key(key) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match') elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: val = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and {value.shape}, do not match') key = original_key mapping[key] = original_key new_dict[key] = value return new_dict @torch.no_grad() def convert_openai_checkpoint(model_name=None, pytorch_dump_folder_path=None): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}"): r = requests.get(f'{PREFIX}{file}', allow_redirects=True) os.makedirs(f'{pytorch_dump_folder_path}/', exist_ok=True) open(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}", 'wb').write(r.content) model_to_convert = MODEL_MAPPING[model_name.split('/')[-1]] config = JukeboxConfig.from_pretrained(model_name) model = JukeboxModel(config) weight_dict = [] mapping = {} for (i, dict_name) in enumerate(model_to_convert): old_dic = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/')[-1]}")['model'] new_dic = {} for k in old_dic.keys(): if k.endswith('.b'): new_dic[k.replace('b', 'bias')] = old_dic[k] elif k.endswith('.w'): new_dic[k.replace('w', 'weight')] = old_dic[k] elif 'level_2' not in dict_name and 'cond.model.' in k: new_dic[k.replace('.blocks.', '.model.')] = old_dic[k] else: new_dic[k] = old_dic[k] key_prefix = 'vqvae' if i == 0 else f'priors.{3 - i}' new_dic = fix_jukebox_keys(new_dic, model.state_dict(), key_prefix, mapping) weight_dict.append(new_dic) vqvae_state_dict = weight_dict.pop(0) model.vqvae.load_state_dict(vqvae_state_dict) for i in range(len(weight_dict)): model.priors[i].load_state_dict(weight_dict[2 - i]) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) with open(f'{pytorch_dump_folder_path}/mapping.json', 'w') as txtfile: json.dump(mapping, txtfile) print(f'Saving model {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) return weight_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='jukebox-5b-lyrics', type=str, help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.') args = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/deprecated/jukebox/modeling_jukebox.py """""" import math import os from typing import List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.nn import LayerNorm as FusedLayerNorm from ....activations import ACT2FN from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging from ....utils.logging import tqdm from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig logger = logging.get_logger(__name__) def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): logits = logits.clone() top_k = min(top_k, logits.size(-1)) if top_k > 0: indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:] logits[indices_to_remove] = filter_value if top_p > 0.0: (sorted_logits, sorted_indices) = torch.sort(logits, descending=True, dim=-1) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_(dim=-1, index=sorted_indices, src=sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration): full_tokens = full_tokens[0] if len(full_tokens) < max_n_lyric_tokens: tokens = torch.cat([torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens]) indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens))) else: midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length) midpoint = min(max(midpoint, max_n_lyric_tokens // 2), len(full_tokens) - max_n_lyric_tokens // 2) tokens = full_tokens[midpoint - max_n_lyric_tokens // 2:midpoint + max_n_lyric_tokens // 2] indices = list(range(midpoint - max_n_lyric_tokens // 2, midpoint + max_n_lyric_tokens // 2)) return (tokens.unsqueeze(dim=0), indices) def get_starts(total_length, n_ctx, hop_length): starts = [] for start in range(0, total_length - n_ctx + hop_length, hop_length): if start + n_ctx >= total_length: start = total_length - n_ctx starts.append(start) return starts def get_alignment(music_tokens, labels, prior, config): level = prior.levels - 1 n_ctx = prior.n_ctx tokens = music_tokens[level] (batch_size, total_length) = (tokens.shape[0], tokens.shape[1]) if total_length < n_ctx: padding_length = n_ctx - total_length tokens = torch.cat([tokens, torch.zeros(batch_size, n_ctx - total_length, dtype=tokens.dtype, device=tokens.device)], dim=1) total_length = tokens.shape[1] else: padding_length = 0 hop_length = int(config.hop_fraction[-level - 1] * prior.n_ctx) (alignment_head, alignment_layer) = (config.prior_alignment_head[0], config.prior_alignment_layer[0]) attn_layers = {alignment_layer} alignment_hops = {} indices_hops = {} for start in tqdm(get_starts(total_length, n_ctx, hop_length), desc='Computing lyric to music alignment '): end = start + n_ctx (metadata, indices_hop) = prior.get_metadata(labels, start, config.sample_length, get_indices=True, offset=0) tokens_bs = torch.chunk(tokens, batch_size, dim=0) metadata_bs = torch.chunk(metadata, batch_size, dim=0) w_hops = [] for (tokens_i, metadata_i) in zip(tokens_bs, metadata_bs): w_hop = prior.forward_tokens(tokens_i[:, start:end], [], metadata_i, get_attn_weights=attn_layers) w_hops.append(w_hop[0][:, alignment_head]) del w_hop weights = torch.cat(w_hops, dim=0) del w_hops alignment_hop = weights.float().cpu().numpy() del weights indices_hops[start] = indices_hop alignment_hops[start] = alignment_hop alignments = [] for item in range(batch_size): full_tokens = labels[0, 3:] alignment = np.zeros((total_length, len(full_tokens) + 1)) for start in reversed(get_starts(total_length, n_ctx, hop_length)): end = start + n_ctx alignment_hop = alignment_hops[start][item] indices = indices_hops[start][item] alignment[start:end, indices] = alignment_hop alignment = alignment[:total_length - padding_length, :-1] alignments.append(alignment) return alignments def save_temp_audio(fname, lvl, metas, aud): aud = torch.clamp(aud, -1, 1).cpu().numpy() for i in list(range(aud.shape[0])): if metas is not None: (artists, genres, lyrics) = list(metas)[i].values() path = f'{fname}/lvl_{lvl}-{artists}-{genres}-{lyrics[:5]}-{i}' np.save(path, aud[i]) else: np.save(f'{fname}/lvl_{lvl}-sample-{i}', aud[i]) def get_mask(mask, query_length, key_value_length, blocks, spread, device, sample, sample_t): if mask is None or query_length == 1: return None offset = sample_t - query_length if sample else max(key_value_length - query_length, 0) if mask == 'autoregressive': mask = torch.ones(query_length, key_value_length, device=device).tril(offset) elif mask == 'summary': mask = torch.ones(query_length, query_length, device=device).tril() mask = torch.ones(query_length, query_length, device=device).tril() mask = mask.view(query_length, blocks, query_length // blocks)[:, :-1, -key_value_length // blocks:] mask = torch.nn.functional.pad(mask, (0, 0, 1, 0), value=1).contiguous().view(query_length, key_value_length) elif mask == 'prime': mask = torch.ones(query_length, key_value_length, device=device).tril(offset) return mask.view(1, 1, query_length, key_value_length) class JukeboxConv1D(nn.Module): def __init__(self, input_width, output_width): super().__init__() self.input_width = input_width self.output_width = output_width weight = torch.empty(input_width, output_width) bias = torch.zeros(output_width) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) def forward(self, hidden_states): size_out = (*hidden_states.size()[:-1], self.output_width) hidden_states = torch.addmm(self.bias.type_as(hidden_states), hidden_states.view(-1, hidden_states.size(-1)), self.weight.type_as(hidden_states)) hidden_states = hidden_states.view(*size_out) return hidden_states class JukeboxResConv1DBlock(nn.Module): def __init__(self, config, conv_width, depth=1, res_scale=1.0): super().__init__() hidden_dim = config.res_convolution_multiplier * conv_width dilation = config.res_dilation_growth_rate ** depth padding = dilation self.res_scale = res_scale self.activation = nn.ReLU() self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation) self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0) def forward(self, hidden_states): residuals = hidden_states hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_2(hidden_states) return residuals + self.res_scale * hidden_states class JukeboxResnet1D(nn.Module): def __init__(self, config, conv_width, n_depth, reverse_dilation=False): super().__init__() self.dilation_cycle = config.res_dilation_cycle res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth) blocks = [] for depth in range(n_depth): block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale)) if reverse_dilation: blocks = blocks[::-1] self.resnet_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.resnet_block: hidden_states = block(hidden_states) return hidden_states class JukeboxEncoderConvBlock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t): super().__init__() blocks = [] filter_t = stride_t * 2 pad_t = stride_t // 2 if down_t > 0: for i in range(down_t): blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t)) blocks.append(JukeboxResnet1D(config, hidden_dim, depth)) self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1) self.downsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.downsample_block: hidden_states = block(hidden_states) hidden_states = self.proj_out(hidden_states) return hidden_states class JukeboxEncoder(nn.Module): def __init__(self, config, width, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() iterator = zip(list(range(self.levels)), downs_t, strides_t) for (i, down_t, stride_t) in iterator: self.level_blocks.append(JukeboxEncoderConvBlock(config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t)) def forward(self, hidden_states): all_hidden_states = [] for level in range(self.levels): level_block = self.level_blocks[level] hidden_states = level_block(hidden_states) all_hidden_states.append(hidden_states) return all_hidden_states class JukeboxDecoderConvBock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True): self.embed_dim = embed_dim self.hidden_dim = hidden_dim super().__init__() blocks = [] if down_t > 0: filter_t = stride_t * 2 pad_t = stride_t // 2 self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1) for i in range(down_t): blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation)) blocks.append(nn.ConvTranspose1d(hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t)) self.upsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) for block in self.upsample_block: hidden_states = block(hidden_states) return hidden_states class JukeboxDecoder(nn.Module): def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for (level, down_t, stride_t) in zip(list(range(self.levels)), downs_t, strides_t): self.level_blocks.append(JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t)) self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1) def forward(self, hidden_states, all_levels=True): hidden_state = hidden_states[-1] for level in reversed(range(self.levels)): level_block = self.level_blocks[level] hidden_state = level_block(hidden_state) if level != 0 and all_levels: hidden_state = hidden_state + hidden_states[level - 1] hidden_state = self.out(hidden_state) return hidden_state class JukeboxBottleneckBlock(nn.Module): def __init__(self, config: JukeboxVQVAEConfig): super().__init__() self.nb_discrete_codes = config.nb_discrete_codes self.codebook_width = config.embed_dim self.mu = config.lmu self.threshold = 1.0 self.init = False self.codebook_sum = None self.codebook_elem = None self.register_buffer('codebook', torch.zeros(self.nb_discrete_codes, self.codebook_width)) def _tile(self, hidden_states): (dim, embed_width) = hidden_states.shape if dim < self.nb_discrete_codes: n_repeats = (self.nb_discrete_codes + dim - 1) // dim std = 0.01 / np.sqrt(embed_width) hidden_states = hidden_states.repeat(n_repeats, 1) hidden_states = hidden_states + torch.randn_like(hidden_states) * std return hidden_states def init_codebook(self, hidden_states): nb_discrete_codes = self.nb_discrete_codes self.init = True codes = self._tile(hidden_states) self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] self.codebook_sum = self.codebook self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device) def update_codebook(self, hidden_states, latent_states): (mu, codebook_width, nb_discrete_codes) = (self.mu, self.codebook_width, self.nb_discrete_codes) with torch.no_grad(): latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device) latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1) _codebook_sum = torch.matmul(latent_states_onehot, hidden_states) _codebook_elem = latent_states_onehot.sum(dim=-1) codes = self._tile(hidden_states) _random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] old_codebook = self.codebook self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float() norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view(nb_discrete_codes, 1) self.codebook = usage * norm_code + (1 - usage) * _random_codebook _codebook_prob = _codebook_elem / torch.sum(_codebook_elem) entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-08)) used_curr = (_codebook_elem >= self.threshold).sum() usage = torch.sum(usage) dk = torch.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape)) return {'entropy': entropy, 'used_curr': used_curr, 'usage': usage, 'dk': dk} def preprocess(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1).contiguous() hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) if hidden_states.shape[-1] == self.codebook_width: prenorm = torch.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape)) elif hidden_states.shape[-1] == 2 * self.codebook_width: (x1, x2) = (hidden_states[..., :self.codebook_width], hidden_states[..., self.codebook_width:]) prenorm = torch.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape)) + torch.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape)) hidden_states = x1 + x2 return (hidden_states, prenorm) def postprocess(self, latent_states, dequantised_states, x_shape): (batch_size, time) = x_shape dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous() latent_states = latent_states.view(batch_size, time) return (latent_states, dequantised_states) def quantise(self, latent_states): codebook_weights = self.codebook.t() distance = torch.sum(latent_states ** 2, dim=-1, keepdim=True) - 2 * torch.matmul(latent_states, codebook_weights) + torch.sum(codebook_weights ** 2, dim=0, keepdim=True) (min_distance, music_tokens) = torch.min(distance, dim=-1) fit = torch.mean(min_distance) return (music_tokens, fit) def dequantise(self, music_tokens): dequantised_states = F.embedding(music_tokens, self.codebook) return dequantised_states def encode(self, latent_states): (samples, _, seq_len) = latent_states.shape (latent_states, _) = self.preprocess(latent_states) (music_tokens, _) = self.quantise(latent_states) music_tokens = music_tokens.view(samples, seq_len) return music_tokens def decode(self, music_tokens): (samples, seq_len) = music_tokens.shape dequantised_states = self.dequantise(music_tokens) dequantised_states = dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous() return dequantised_states def forward(self, hidden_states, update_codebook=True): (samples, _, seq_len) = hidden_states.shape (hidden_states, prenorm) = self.preprocess(hidden_states) if update_codebook and (not self.init): self.init_codebook(hidden_states) (music_tokens, fit) = self.quantise(hidden_states) dequantised_states = self.dequantise(music_tokens) if update_codebook: update_metrics = self.update_codebook(hidden_states, music_tokens) else: update_metrics = {} commit_loss = torch.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape) dequantised_states = hidden_states + (dequantised_states - hidden_states).detach() (music_tokens, dequantised_states) = self.postprocess(music_tokens, dequantised_states, (samples, seq_len)) return (music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics)) class JukeboxBottleneck(nn.Module): def __init__(self, config, levels): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for level in range(self.levels): self.level_blocks.append(JukeboxBottleneckBlock(config)) def encode(self, raw_audio): music_tokens = [level_block.encode(hidden_states) for (level_block, hidden_states) in zip(self.level_blocks, raw_audio)] return music_tokens def decode(self, music_tokens, start_level=0, end_level=None): if end_level is None: end_level = self.levels quantised_audio = [level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], music_tokens)] return quantised_audio def forward(self, input_audio): (music_tokens, quantised_states, commit_losses, metrics) = ([], [], [], []) for level in range(self.levels): level_block = self.level_blocks[-level - 1] hidden_states = input_audio[level] (sampled_tokens, quantised_state, commit_loss, metric) = level_block(hidden_states, update_codebook=self.training) music_tokens.append(sampled_tokens) if not self.training: quantised_state = quantised_state.detach() quantised_states.append(quantised_state) commit_losses.append(commit_loss) if self.training: metrics.append(metric) return (music_tokens, quantised_states, commit_losses, metrics) JUKEBOX_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config (`JukeboxConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam\nRinger, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://arxiv.org/abs/2002.08111).\n\n ', JUKEBOX_START_DOCSTRING) class JukeboxVQVAE(PreTrainedModel): config_class = JukeboxVQVAEConfig base_model_prefix = 'vqvae' def _init_weights(self, module): if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weight.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxVQVAEConfig): super().__init__(config) downs_t = config.res_downs_t strides_t = config.res_strides_t if not config.sample_length: downsamples = [stride ** down for (stride, down) in zip(strides_t, downs_t)] top_raw_to_tokens = np.prod(downsamples) config.sample_length = config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens * top_raw_to_tokens config.sample_length = config.sample_length.astype(int) self.nb_discrete_codes = config.nb_discrete_codes self.commit = config.commit self.sample_length = config.sample_length self.downsamples = [stride ** down for (stride, down) in zip(strides_t, downs_t)] self.hop_lengths = np.cumprod(self.downsamples) self.levels = levels = config.levels self.music_tokens_shapes = [int(self.sample_length // self.hop_lengths[-level - 1]) for level in range(levels)] self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels self.encoders = nn.ModuleList() self.decoders = nn.ModuleList() for level in range(levels): width = config.res_conv_width * self.multipliers[level] depth = config.res_conv_depth * self.multipliers[level] self.encoders.append(JukeboxEncoder(config, width, depth, level + 1, downs_t[:level + 1], strides_t[:level + 1])) self.decoders.append(JukeboxDecoder(config, width, depth, level + 1, downs_t[:level + 1], strides_t[:level + 1])) self.bottleneck = JukeboxBottleneck(config, levels) def _decode(self, music_tokens, start_level=0, end_level=None): if end_level is None: end_level = self.levels latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level) (decoder, dequantised_state) = (self.decoders[start_level], latent_states[0:1]) dequantised_state = decoder(dequantised_state, all_levels=False) dequantised_state = dequantised_state.permute(0, 2, 1) return dequantised_state def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor: token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens] dequantised_states = [] for i in range(bs_chunks): music_tokens_i = [chunks[i] for chunks in token_chunks] dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level) dequantised_states.append(dequantised_state) return torch.cat(dequantised_states, dim=0) def _encode(self, raw_audio, start_level=0, end_level=None): if end_level is None: end_level = self.levels input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) music_tokens = self.bottleneck.encode(latent_states) return music_tokens[start_level:end_level] def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0) music_tokens_list = [] for chunk_i in audio_chunks: music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level) music_tokens_list.append(music_tokens_i) music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)] return music_tokens def sample(self, n_samples): music_tokens = [torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device='cpu') for music_tokens_shape in self.music_tokens_shapes] return self.decode(music_tokens) def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]: input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) (_, music_tokens, commit_losses, _) = self.bottleneck(latent_states) dequantised_states = [] for level in range(self.levels): decoder = self.decoders[level] dequantised_state = decoder(music_tokens[level:level + 1], all_levels=False) dequantised_states.append(dequantised_state.permute(0, 2, 1)) commit_loss = sum(commit_losses) loss = self.commit * commit_loss return (dequantised_states, loss) class JukeboxMLP(nn.Module): def __init__(self, config): super().__init__() embed_dim = config.hidden_size hidden_dim = int(config.mlp_multiplier * embed_dim) self.c_fc = JukeboxConv1D(embed_dim, hidden_dim) self.c_proj = JukeboxConv1D(hidden_dim, embed_dim) self.act = ACT2FN[config.act_fn] self.dropout = nn.Dropout(config.resid_dropout) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class JukeboxLayerNorm(FusedLayerNorm): def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True): super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) self.width = np.prod(normalized_shape) self.max_numel = 65535 * self.width def forward(self, input): if input.numel() > self.max_numel: return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input) else: return super().forward(input).type_as(input) class JukeboxAttention(nn.Module): def __init__(self, config, n_ctx, attn_func='dense_attn'): super().__init__() self.embed_dim = config.hidden_size self.n_heads = config.n_heads self.dropout = config.attn_dropout hidden_dim = int(config.attention_multiplier * self.embed_dim) self.head_dim = hidden_dim // config.n_heads self.n_ctx = n_ctx self.hidden_dim = hidden_dim self.scale = self.head_dim ** (-0.25) self.mask = config.mask if attn_func == 'cross_attention': self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim) self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2) else: self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3) self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) self.attn_func = attn_func if attn_func == 'cross_attention': self.qkv = self.decode_qkv elif attn_func == 'prime_attn': self.qkv = self.prime_qkv else: self.qkv = self.factored_qkv ATTENTION_MAP = {'dense_attn': (self.dense_attn, 'autoregressive'), 'block_attn': (self.block_attn, 'autoregressive'), 'transpose_block_attn': (self.transpose_block_attn, 'autoregressive'), 'prev_block_attn': (self.prev_block_attn, None), 'summary_attn': (self.summary_attn, 'summary'), 'summary_spread_attn': (self.summary_spread_attn, 'summary'), 'cross_attention': (self.dense_attn, None), 'prime_attn': (self.prime_attn, 'prime')} (self.attn, self.attn_mask) = ATTENTION_MAP[attn_func] self.blocks = config.blocks self.spread = config.spread if self.blocks is not None: self.block_ctx = self.n_ctx // self.blocks self.sample_t = 0 self.cache = {} self.encoder_len = config.nb_relevant_lyric_tokens self.record_attn = False def _attn(self, query_states, key_states, value_states, sample): scale = self.scale if self.training: attention_weight = torch.matmul(query_states * scale, key_states * scale) else: attention_weight = torch.matmul(query_states, key_states) attention_weight.mul_(scale * scale) attn_weight_type = attention_weight.dtype attention_weight = attention_weight.float() if self.mask: mask = get_mask(self.attn_mask, query_states.size(-2), key_states.size(-1), self.blocks, self.spread, attention_weight.device, sample, self.sample_t) if mask is not None: attention_weight = attention_weight * mask + -1000000000.0 * (1 - mask) attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type) if self.record_attn: self.attention_prob = attention_prob if self.attn_func == 'prime_attn': self.attention_prob = self.attention_prob[:, :, self.encoder_len:, :self.encoder_len] attention_prob = self.attn_dropout(attention_prob) context_states = torch.matmul(attention_prob, value_states) return context_states def merge_heads(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1)) return hidden_states.view(*new_hidden_states_shape) def split_heads(self, hidden_states, is_key=False): new_hidden_states_shape = (*hidden_states.size()[:-1], self.n_heads, hidden_states.size(-1) // self.n_heads) hidden_states = hidden_states.view(*new_hidden_states_shape) if is_key: return hidden_states.permute(0, 2, 3, 1) else: return hidden_states.permute(0, 2, 1, 3) def dense_attn(self, query, key, value, sample): query = self.split_heads(query) key = self.split_heads(key, is_key=True) value = self.split_heads(value) context_states = self._attn(query, key, value, sample) context_states = self.merge_heads(context_states) return context_states def block_attn(self, query, key, value, sample): block_ctx = self.block_ctx (batch_size, seq_len, embed_dim) = value.shape if sample: return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) if query_length < seq_len: seq_len = query_length key = key[:, -seq_len:].contiguous() value = value[:, -seq_len:].contiguous() key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def transpose_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx (batch_size, seq_len, embed_dim) = value.shape if sample: block_len = (seq_len - 1) % block_ctx key = key[:, block_len::block_ctx, :] value = value[:, block_len::block_ctx, :] return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim) query = query.transpose(1, 2).contiguous() query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) key = key.transpose(1, 2).contiguous() key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) value = value.transpose(1, 2).contiguous() value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) block_attn = self.dense_attn(query, key, value, sample) block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim) block_attn = block_attn.transpose(1, 2).contiguous() block_attn = block_attn.view(batch_size, query_length, embed_dim) return block_attn def prev_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx (batch_size, seq_len, embed_dim) = value.shape if sample: block = (seq_len - 1) // block_ctx prev_l = (block - 1) * block_ctx if block > 0: key = key[:, prev_l:prev_l + block_ctx, :] value = value[:, prev_l:prev_l + block_ctx, :] else: key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)) key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) if query_length < seq_len: nb_query_blocks = query_length // block_ctx nb_key_blocks = seq_len // block_ctx seq_len = query_length key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_attn(self, query, key, value, sample): blocks = self.blocks block_ctx = self.block_ctx (batch_size, seq_len, embed_dim) = value.shape if sample: key = key[:, block_ctx - 1:blocks * block_ctx - 1:block_ctx, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) value = value[:, block_ctx - 1:blocks * block_ctx - 1:block_ctx, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_spread_attn(self, query, key, value, sample): blocks = self.blocks spread = self.spread (batch_size, seq_len, embed_dim) = value.shape if sample: raise NotImplementedError else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous() key = key.view(batch_size, blocks * spread, embed_dim) value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous() value = value.view(batch_size, blocks * spread, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def prime_attn(self, query, key, value, sample): encoder_len = self._encoder_len key = key[:, :encoder_len] value = value[:, :encoder_len] return self.dense_attn(query, key, value, sample) def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError('last_encoder_hidden_states should be None') (query, key, value) = hidden_states.chunk(3, dim=2) if sample: self.sample_t += curr_ctx (key, value) = self._append_cache(key, value) l_cache = self._suff_cache_len() if self._cache_len() > l_cache: self._slice_cache(-l_cache) if curr_ctx > 1: if self.attn_func != 'dense_attn': query = self._pad_to_block_ctx(query, query=True) key = self._pad_to_block_ctx(key) value = self._pad_to_block_ctx(value) sample = False else: key = self.cache['key'] value = self.cache['value'] return (query, key, value, sample) def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError('last_encoder_hidden_states should be None') (query, key, value) = hidden_states.chunk(3, dim=2) if sample: if self._cache_len() < self._encoder_len: self._append_cache(key, value) if self._cache_len() > self._encoder_len: self._slice_cache(0, self._encoder_len) (key, value) = (self.cache['key'], self.cache['value']) self.sample_t += curr_ctx return (query, key, value, sample) def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] query = hidden_states if sample: if self.sample_t == 0: (self.cache['key'], self.cache['value']) = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2) (key, value) = (self.cache['key'], self.cache['value']) self.sample_t += curr_ctx else: (key, value) = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2) return (query, key, value, sample) def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] hidden_states = self.c_attn(hidden_states) (query, key, value, sample) = self.qkv(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample) attention_scores = self.attn(query, key, value, sample) if attention_scores.shape[1] != curr_ctx: offset = self._offset(curr_ctx) attention_scores = attention_scores[:, offset:offset + curr_ctx, :].contiguous() attention_scores = self.c_proj(attention_scores) return self.resid_dropout(attention_scores) @property def _encoder_len(self): encoder_len = self.encoder_len encoder_blocks = encoder_len // self.blocks + 1 return encoder_blocks * self.blocks def _offset(self, curr_ctx): if self.attn_func == 'dense_attn': return 0 return (self.sample_t - curr_ctx) % self.block_ctx def _pad_to_block_ctx(self, hidden_states, query=False): seq_len = hidden_states.shape[1] offset = self._offset(seq_len) if query else 0 n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx pad = n_blocks * self.block_ctx - seq_len - offset if pad == 0 and offset == 0: return hidden_states else: return F.pad(hidden_states, (0, 0, offset, pad)) def _cache_len(self): return 0 if 'key' not in self.cache else self.cache['key'].shape[1] def _suff_cache_len(self): previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx REQUIRED_CACHE_LEN = {'dense_attn': self.sample_t, 'block_attn': (self.sample_t - 1) % self.block_ctx + 1, 'transpose_block_attn': self.sample_t, 'prev_block_attn': self.sample_t if self.sample_t <= self.block_ctx else previous_block_length, 'cross_attn': self.encoder_len, 'prime_attn': min(self.sample_t, self._encoder_len)} return REQUIRED_CACHE_LEN[self.attn_func] def _slice_cache(self, start, end=None): self.cache['key'] = self.cache['key'][:, start:end] self.cache['value'] = self.cache['value'][:, start:end] def _append_cache(self, key, value): if 'key' not in self.cache: self.cache['key'] = key self.cache['value'] = value else: (old_key, old_value) = (key, value) key = torch.cat([self.cache['key'], old_key], dim=1) value = torch.cat([self.cache['value'], old_value], dim=1) del self.cache['key'] del self.cache['value'] del old_key del old_value self.cache['key'] = key self.cache['value'] = value return (self.cache['key'], self.cache['value']) def del_cache(self): self.sample_t = 0 if 'key' in self.cache: del self.cache['key'] if 'value' in self.cache: del self.cache['value'] self.cache = {} class JukeboxBlock(nn.Module): def __init__(self, config, n_ctx, attn_func='dense_attn'): super().__init__() self.width = config.hidden_size self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func) self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size) self.mlp = JukeboxMLP(config) self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size) self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0 self.attn_func = attn_func def forward(self, hidden_states, last_encoder_hidden_states, sample=False): residuals = hidden_states hidden_states = self.layer_norm_0(hidden_states) hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample) output_states = self.layer_norm_1(residuals + hidden_states) output_states = self.mlp(output_states) if self.res_scale == 1.0: output = residuals + hidden_states + output_states else: output = residuals + self.res_scale * (hidden_states + output_states) return output class JukeboxLayerStack(nn.Module): def __init__(self, config, n_ctx): super().__init__() self.n_ctx = n_ctx self.width = config.hidden_size self.num_layers = config.num_layers self.blocks = config.blocks self.attention_pattern = config.attention_pattern if self.blocks is not None: self.block_ctx = n_ctx // self.blocks self.encoder_len = config.nb_relevant_lyric_tokens self.n_heads = config.n_heads attention_pattern = ATTENTION_PATTERNS[self.attention_pattern] self._attn_mods = nn.ModuleList() for depth in range(self.num_layers): self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth))) self.saved_attn_weights = [] def set_record_attn(self, record_attn): def _should_record_attn(layer_idx): if isinstance(record_attn, bool): return record_attn return layer_idx in record_attn for (i, layer) in enumerate(self._attn_mods): layer.attn.record_attn = _should_record_attn(i) if not record_attn: self.saved_attn_weights = [] def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): for (i, attn_layer) in enumerate(self._attn_mods): if attn_layer.attn_func == 'cross_attention': hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample) else: hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample) if attn_layer.attn.record_attn: self.saved_attn_weights.append(attn_layer.attn.c_attn.weight) return hidden_states def del_cache(self): for attn_layer in self._attn_mods: attn_layer.attn.del_cache() class JukeboxPositionalEmbedding(nn.Module): def __init__(self, embed_dim, width): super().__init__() self.pos_emb = nn.Parameter(torch.empty((embed_dim, width))) def forward(self): pos_emb = self.pos_emb return pos_emb class JukeboxConditionalAutoregressive(nn.Module): def __init__(self, config, n_ctx=None, embed_dim=None, audio_conditioning=False, metadata_conditioning=False, is_encoder=False): super().__init__() self.width = config.hidden_size self.num_layers = config.num_layers self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size) self.embed_tokens_dropout = nn.Dropout(config.emb_dropout) self.metadata_conditioning = metadata_conditioning self.audio_conditioning = audio_conditioning if not metadata_conditioning: self.start_token = nn.Parameter(torch.empty((1, config.hidden_size))) self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size) self.pos_emb_dropout = nn.Dropout(config.emb_dropout) self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx) self.is_encoder = is_encoder self.encoder_len = config.nb_relevant_lyric_tokens if config.merged_decoder: self.add_cond_after_transformer = False self.share_embed_tokens_fc_proj_out = False else: self.add_cond_after_transformer = True self.share_embed_tokens_fc_proj_out = True if not is_encoder: self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False) if self.share_embed_tokens_fc_proj_out: self.fc_proj_out.weight = self.embed_tokens.weight self.loss = torch.nn.CrossEntropyLoss() def forward(self, tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, get_preds=False, get_acts=False, get_sep_loss=False): batch_size = tokens.shape[0] with torch.no_grad(): tokens = tokens.view(batch_size, -1).long() if not self.audio_conditioning: audio_conditioning = torch.zeros((batch_size, 1, self.width), device=tokens.device, dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype) target = tokens hidden_states = self.embed_tokens(tokens) hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width) else: hidden_states[:, 0] = self.start_token hidden_states = self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states) if self.add_cond_after_transformer: hidden_states = hidden_states + audio_conditioning activations = hidden_states if self.is_encoder: return hidden_states hidden_states = self.fc_proj_out(hidden_states) loss_fn = nn.CrossEntropyLoss() if get_sep_loss: lyric_hidden_states = hidden_states[:, :self.encoder_len].reshape(-1, self.embed_dim) token_hidden_states = hidden_states[:, self.encoder_len:].reshape(-1, self.embed_dim) lyric_loss = loss_fn(lyric_hidden_states, target[:, :self.encoder_len].reshape(-1)) / np.log(2.0) music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len:].reshape(-1)) / np.log(2.0) loss = (lyric_loss, music_token_loss) else: loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0) if get_preds: return (loss, hidden_states) elif get_acts: return (loss, activations) else: return (loss, None) def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning): if sample_t == 0: hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to(self.embed_tokens.weight.device) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width) else: hidden_states[:, 0] = self.start_token else: hidden_states = self.embed_tokens(tokens) if audio_conditioning.shape == (n_samples, self.n_ctx, self.width): cond = audio_conditioning[:, sample_t:sample_t + 1, :] else: cond = audio_conditioning hidden_states = hidden_states + self.pos_emb()[sample_t:sample_t + 1] + cond return (hidden_states, cond) def sample(self, n_samples, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, sample_tokens=None): if sample_tokens is None: sample_tokens = self.n_ctx if not self.audio_conditioning: audio_conditioning = torch.zeros((n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype).to(self.fc_proj_out.device) with torch.no_grad(): sampled_tokens = [] tokens = None if get_preds: preds = [] iter = tqdm(range(0, sample_tokens), leave=False) for sample_t in iter: iter.set_description(f'Ancestral sampling {sample_tokens} music tokens', refresh=True) (hidden_states, cond) = self.get_emb(sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning) hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) if get_preds: preds.append(hidden_states.clone()) hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_tokens.append(tokens.clone()) del tokens self.transformer.del_cache() tokens = torch.cat(sampled_tokens, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return (tokens, preds) else: return tokens def split_chunks(self, length, chunk_size): n_passes = (length + chunk_size - 1) // chunk_size chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1] return chunk_sizes def primed_sample(self, n_samples, lyric_and_music_tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, chunk_size=None, sample_tokens=None): if sample_tokens is None: sample_tokens = self.n_ctx batch_size = lyric_and_music_tokens.shape[0] with torch.no_grad(): lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long() sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1) sampled_audio = list(sampled_audio) if not self.audio_conditioning: audio_conditioning = torch.zeros((n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype).to(lyric_and_music_tokens.device) with torch.no_grad(): if get_preds: preds = [] if chunk_size is None: chunk_size = len(sampled_audio) chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size) x_primes = [] start = 0 token = None for current_chunk_size in tqdm(chunk_sizes, desc='Preparing past key value', leave=False): (sampled_audio_prime, conds_prime) = ([], []) for sample_t in range(start, start + current_chunk_size): (x_prime, cond_prime) = self.get_emb(sample_t, n_samples, token, audio_conditioning, metadata_conditioning) token = sampled_audio[sample_t] sampled_audio_prime.append(x_prime) conds_prime.append(cond_prime) start = start + current_chunk_size (x_prime, cond_prime) = (torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1)) del sampled_audio_prime del conds_prime if not get_preds: del cond_prime x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) if get_preds: if self.add_cond_after_transformer: x_prime = x_prime + cond_prime del cond_prime x_primes.append(x_prime) else: del x_prime if get_preds: x_prime = torch.cat(x_primes, dim=1) x_prime = self.fc_proj_out(x_prime) preds.append(x_prime) input_tokens = sampled_audio[-1] itererator = tqdm(range(len(sampled_audio), sample_tokens), desc=f'Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens', leave=False) for sample_t in itererator: (hidden_states, cond) = self.get_emb(sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning) hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) if get_preds: preds.append(hidden_states) hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) music_tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_audio.append(music_tokens.clone()) input_tokens = music_tokens del input_tokens, music_tokens self.transformer.del_cache() music_tokens = torch.cat(sampled_audio, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return (music_tokens, preds) else: return music_tokens class JukeboxMusicTokenConditioner(nn.Module): def __init__(self, config, level): super().__init__() self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size) config.embed_dim = config.music_vocab_size self.upsampler = JukeboxDecoderConvBock(config, config.hidden_size, config.res_conv_width, config.res_conv_depth, config.res_downs_t[level], config.res_strides_t[level], reverse_dilation=False) self.layer_norm = JukeboxLayerNorm(config.hidden_size) def forward(self, music_tokens, raw_audio_conditionning=None): if raw_audio_conditionning is None: raw_audio_conditionning = 0.0 music_tokens = music_tokens.long() hidden_states = self.embed_tokens(music_tokens) hidden_states = hidden_states + raw_audio_conditionning hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.upsampler(hidden_states) hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) return hidden_states class JukeboxRangeEmbedding(nn.Module): def __init__(self, n_time, embed_dim, range, out_width, clamp=False): super().__init__() self.n_time = n_time self.embed_dim = embed_dim self.emb = nn.Embedding(embed_dim, out_width) (self.pos_min, self.pos_max) = range self.clamp = clamp def forward(self, pos_start, pos_end=None): if not len(pos_start.shape) == 2: raise TypeError(f'Expected shape with 2 dims, got {pos_start.shape}') if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all(): raise TypeError(f'Range is [{self.pos_min},{self.pos_max}), got {pos_start}') pos_start = pos_start.float() if pos_end is not None: if self.clamp: pos_end = pos_end.clamp(self.pos_min, self.pos_max) pos_end = pos_end.float() n_time = self.n_time if n_time != 1: interpolation = torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time position = pos_start + (pos_end - pos_start) * interpolation else: position = pos_start normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min) bins_ = (self.embed_dim * normalised_position).floor().long().detach() return self.emb(bins_) class JukeboxLabelConditioner(nn.Module): def __init__(self, config, include_time_signal): super().__init__() embed_dim = config.hidden_size timing_dims = config.timing_dims sampling_rate = config.sampling_rate (nb_genres, nb_artists) = config.metadata_dims music_tokens_shape = config.n_ctx self.max_nb_genres = config.max_nb_genres self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim) self.artist_emb = nn.Embedding(nb_artists, embed_dim) self.include_time_signal = include_time_signal if self.include_time_signal: total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate) absolute_pos_range = (0.0, config.max_duration * sampling_rate) relative_pos_range = (0.0, 1.0) self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim) self.absolute_pos_emb = JukeboxRangeEmbedding(music_tokens_shape, timing_dims, absolute_pos_range, embed_dim) self.relative_pos_emb = JukeboxRangeEmbedding(music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True) def forward(self, metadata): total_length = metadata[:, 0:1] offset = metadata[:, 1:2] length = metadata[:, 2:3] artist = metadata[:, 3:4] genre = metadata[:, 4:] artist_emb = self.artist_emb(artist) mask = (genre >= 0).float().unsqueeze(2) genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True) start_emb = genre_emb + artist_emb if self.include_time_signal: (start, end) = (offset, offset + length) total_length = total_length.float() start = start.float() end = end.float() pos_emb = self.total_length_emb(total_length) + self.absolute_pos_emb(start, end) + self.relative_pos_emb(start / total_length, end / total_length) else: pos_emb = None return (start_emb, pos_emb) class JukeboxPrior(PreTrainedModel): config_class = JukeboxPriorConfig def _init_weights(self, module): init_scale = self.config.init_scale if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxPositionalEmbedding): module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxRangeEmbedding): module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, 'lm_head'): module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, 'start_token'): module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weigth.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None): super().__init__(config) self.vqvae_encoder = vqvae_encoder self.vqvae_decoder = vqvae_decoder self.levels = nb_priors self.level = level if level is not None else config.level self.base_model_prefix = f'priors.{self.level}' self.n_ctx = config.n_ctx self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0 self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.encoder_loss_fraction = config.encoder_loss_fraction self.audio_conditioning = self.level != 0 self.cond_level = self.level - 1 if self.audio_conditioning: self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level) self.metadata_conditioning = config.metadata_conditioning if self.metadata_conditioning: self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning) self.is_encoder_decoder = config.is_encoder_decoder if config.is_encoder_decoder: self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx] self.embed_dim_shift = [0, config.lyric_vocab_size] self.width = config.hidden_size self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.prior = JukeboxConditionalAutoregressive(config, n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx, embed_dim=config.lyric_vocab_size + config.music_vocab_size, audio_conditioning=self.audio_conditioning or self.metadata_conditioning, metadata_conditioning=True) else: encoder_config = config.encoder_config if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: self.lyric_acts_width = encoder_config.hidden_size self.encoder_width = config.hidden_size self.encoder_dim = config.lyric_vocab_size self.encoder = JukeboxConditionalAutoregressive(encoder_config, n_ctx=self.nb_relevant_lyric_tokens, embed_dim=self.encoder_dim, audio_conditioning=False, metadata_conditioning=False, is_encoder=True) self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size) self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size) self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False) else: self.nb_relevant_lyric_tokens = 0 self.prior = JukeboxConditionalAutoregressive(config, audio_conditioning=self.audio_conditioning or self.metadata_conditioning, metadata_conditioning=self.metadata_conditioning) self.next_token_prediction_loss_dims = config.n_ctx self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims self.downsamples = [stride ** down for (stride, down) in zip(config.res_strides_t, config.res_downs_t)] self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None self.raw_to_tokens = np.prod(self.downsamples[:nb_priors - self.level]) self.sample_length = self.n_ctx * self.raw_to_tokens logger.info(f'Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample length:{self.sample_length}') def get_metadata(self, labels, start, total_length, offset, get_indices=False): metadata = labels.clone() metadata[:, 0] = total_length metadata[:, 2] = int(self.sample_length) metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens) (metadata, indices) = self.set_metadata_lyric_tokens(metadata) if get_indices: return (metadata, indices) else: return metadata def set_metadata_lyric_tokens(self, labels): if self.nb_relevant_lyric_tokens > 0: tokens_list = torch.zeros((labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device) indices_list = [] for idx in range(labels.shape[0]): full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres:] (total_length, offset, duration) = (labels[idx, 0], labels[idx, 1], labels[idx, 2]) (tokens, indices) = get_relevant_lyric_tokens(full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration) tokens_list[idx, :] = tokens indices_list.append(indices) return (torch.cat((labels[:, :4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1), indices_list) else: return (labels, None) def get_music_tokens_conds(self, music_tokens, start, end): if self.level != 0: music_tokens_cond = music_tokens[self.level - 1] music_tokens = music_tokens_cond[:, start // self.cond_downsample:end // self.cond_downsample] missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1] if missing_cond_len > 0: init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device) music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long() music_tokens_conds = [music_tokens_cond] else: music_tokens_conds = None return music_tokens_conds def prior_preprocess(self, tokens, conds): batch_size = tokens[0].shape[0] for i in range(len(tokens)): tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1) for i in range(len(conds)): if conds[i] is None: conds[i] = torch.zeros((batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device) return (torch.cat(tokens, dim=1), torch.cat(conds, dim=1)) def prior_postprocess(self, tokens): batch_size = tokens.shape[0] dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0]) tokens = list(torch.split(tokens, dims, dim=1)) for i in range(len(tokens)): bins_shift = int(self.embed_dim_shift[i]) tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1) tokens[i] = torch.clamp(tokens[i], min=0) return tokens[-1] def embed_tokens(self, music_tokens_conds): music_tokens_conds = music_tokens_conds[:self.cond_level + 1] audio_conditioning = None for (music_tokens_cond, conditioner_block) in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))): audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning) return audio_conditioning def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1): if start_level is None: start_level = self.level if end_level is None: end_level = self.levels with torch.no_grad(): latent_states = self.vqvae_encoder(hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks) return latent_states def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1): if start_level is None: start_level = self.level if end_level is None: end_level = self.levels with torch.no_grad(): output = self.vqvae_decoder(music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks) return output def get_cond(self, music_tokens_conds, metadata): if metadata is not None: n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens (metadata, lyric_tokens) = (metadata[:, :n_labels], metadata[:, n_labels:]) else: (metadata, lyric_tokens) = (None, None) (metadata_conditioning, metadata_pos) = self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None) audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos return (audio_conditioning, metadata_conditioning, lyric_tokens) def sample(self, n_samples, music_tokens=None, music_tokens_conds=None, metadata=None, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None): no_past_context = music_tokens is None or music_tokens.shape[1] == 0 name = {True: 'Ancestral', False: 'Primed'}[no_past_context] logger.info(f'{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}') with torch.no_grad(): (audio_conditioning, metadata_conditioning, lyric_tokens) = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: if no_past_context: (lyric_and_music_tokens, audio_conditioning) = self.prior_preprocess([lyric_tokens], [None, audio_conditioning]) else: (lyric_and_music_tokens, audio_conditioning) = self.prior_preprocess([lyric_tokens, music_tokens], [None, audio_conditioning]) if sample_tokens is not None: sample_tokens += self.nb_relevant_lyric_tokens music_tokens = self.prior.primed_sample(n_samples, lyric_and_music_tokens, audio_conditioning, metadata_conditioning, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) music_tokens = self.prior_postprocess(music_tokens) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True) if no_past_context: music_tokens = self.prior.sample(n_samples, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, sample_tokens=sample_tokens) else: music_tokens = self.prior.primed_sample(n_samples, music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) return music_tokens def get_encoder_states(self, lyric_tokens, sample=False): if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: if sample: self.encoder = self.encoder.to(lyric_tokens.device) lyric_acts = self.encoder(lyric_tokens, None, None, None) lyric_acts = self.encoder.proj_in(lyric_acts) last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts) else: last_encoder_hidden_states = None return last_encoder_hidden_states def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics): if self.lyric_conditioning: last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states) encoder_loss = nn.functional.cross_entropy(last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1)) / np.log(2.0) else: encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device) return encoder_loss def forward_tokens(self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False): if get_attn_weights: self.prior.transformer.set_record_attn(get_attn_weights) (audio_conditioning, metadata_conditioning, lyric_tokens) = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: (tokens, audio_conditioning) = self.prior_preprocess([lyric_tokens, music_tokens], [None, audio_conditioning]) ((encoder_loss, next_token_prediction_loss), preds) = self.prior(tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens) encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens) (next_token_prediction_loss, preds) = self.prior(music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, get_preds=get_preds) loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims metrics = {'bpd': next_token_prediction_loss.clone().detach(), 'encoder_loss': encoder_loss.clone().detach(), 'next_token_prediction_loss': next_token_prediction_loss.clone().detach()} if get_preds: metrics['preds'] = preds.clone().detach() if get_attn_weights: saved_attn_weights = self.prior.transformer.saved_attn_weights self.prior.transformer.set_record_attn(False) return saved_attn_weights else: return (loss, metrics) def forward(self, hidden_states: torch.Tensor, metadata: Optional[List[torch.LongTensor]], decode: Optional[bool]=False, get_preds: Optional[bool]=False) -> List[torch.Tensor]: batch_size = hidden_states.shape[0] (music_tokens, *music_tokens_conds) = self.encode(hidden_states, bs_chunks=batch_size) (loss, metrics) = self.forward_tokens(music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds) if decode: dequantised_states = self.decode([music_tokens, *music_tokens_conds]) else: dequantised_states = None return (dequantised_states, loss, metrics) class JukeboxPreTrainedModel(PreTrainedModel): config_class = JukeboxConfig base_model_prefix = 'jukebox' supports_gradient_checkpointing = False def _init_weights(self, module): if isinstance(module, JukeboxPrior) or isinstance(module, JukeboxVQVAE): module.apply(module._init_weights) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) JUKEBOX_SAMPLING_INPUT_DOCSTRING = '\n labels (`List[torch.LongTensor]` of length `n_sample`, and shape `(self.levels, self.config.max_nb_genre + lyric_sequence_length)` :\n List of metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to\n condition the generation.\n sampling_kwargs (`Dict[Any]`):\n Various additional sampling arguments that are used by the `_sample` function. A detail list of the\n arguments can bee seen in the [`_sample`] function documentation.\n' @add_start_docstrings('The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`,\n `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If\n you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior\n individually.\n ', JUKEBOX_START_DOCSTRING) class JukeboxModel(JukeboxPreTrainedModel): _no_split_modules = ['JukeboxBlock'] def __init__(self, config): super().__init__(config) vqvae_config = config.vqvae_config self.vqvae = JukeboxVQVAE(vqvae_config) self.set_shared_params(config) self.priors = nn.ModuleList([JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)]) def set_shared_params(self, model_config): for config in model_config.prior_configs: config.sampling_rate = model_config.sampling_rate config.timing_dims = model_config.timing_dims config.min_duration = model_config.min_duration config.max_duration = model_config.max_duration config.max_nb_genres = model_config.max_nb_genres config.metadata_conditioning = model_config.metadata_conditioning def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks) def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks) def split_batch(self, obj, n_samples, split_size): n_passes = (n_samples + split_size - 1) // split_size if isinstance(obj, torch.Tensor): return torch.split(obj, split_size, dim=0) elif isinstance(obj, list): return list(zip(*[torch.split(item, split_size, dim=0) for item in obj])) elif obj is None: return [None] * n_passes else: raise TypeError('Unknown input type') def sample_partial_window(self, music_tokens, labels, offset, sampling_kwargs, level, tokens_to_sample, max_batch_size): prior = self.priors[level] sampled_tokens = music_tokens[level] n_ctx = prior.n_ctx nb_sampled_tokens = sampled_tokens.shape[1] if nb_sampled_tokens < n_ctx - tokens_to_sample: sampling_kwargs['sample_tokens'] = nb_sampled_tokens + tokens_to_sample start = 0 else: sampling_kwargs['sample_tokens'] = n_ctx start = nb_sampled_tokens - n_ctx + tokens_to_sample return self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size) def sample_single_window(self, music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size): prior = self.priors[level] n_samples = music_tokens[0].shape[0] n_ctx = prior.n_ctx end = start + n_ctx previous_sampled_tokens = music_tokens[level][:, start:end] sample_tokens = sampling_kwargs.get('sample_tokens', None) if 'sample_tokens' in sampling_kwargs: sample_tokens = end - start conditioning_tokens = previous_sampled_tokens.shape[1] new_tokens = sample_tokens - previous_sampled_tokens.shape[1] logger.info(f'Sampling {sample_tokens} tokens for [{start},{start + sample_tokens}]. Conditioning on {conditioning_tokens} tokens') if new_tokens <= 0: return music_tokens music_tokens_conds = prior.get_music_tokens_conds(music_tokens, start, end) metadata = prior.get_metadata(labels, start, self.total_length, offset) music_tokens_list = self.split_batch(previous_sampled_tokens, n_samples, max_batch_size) music_tokens_conds_list = self.split_batch(music_tokens_conds, n_samples, max_batch_size) metadata_list = self.split_batch(metadata, n_samples, max_batch_size) tokens = [] iterator = tqdm(zip(music_tokens_list, music_tokens_conds_list, metadata_list), leave=False) for (music_tokens_i, music_tokens_conds_i, metadata_i) in iterator: name = ['Ancestral', 'Primed'][music_tokens_i.shape[1] == 0] iterator.set_description(f'[prior level {level}] {name} Sampling {sample_tokens} tokens out of {self.total_length // prior.raw_to_tokens}', refresh=True) tokens_i = prior.sample(n_samples=music_tokens_i.shape[0], music_tokens=music_tokens_i, music_tokens_conds=music_tokens_conds_i, metadata=metadata_i, **sampling_kwargs) tokens.append(tokens_i) sampled_tokens = torch.cat(tokens, dim=0) music_tokens_new = sampled_tokens[:, -new_tokens:] music_tokens[level] = torch.cat([music_tokens[level], music_tokens_new], dim=1) return music_tokens def sample_level(self, music_tokens, labels, offset, sampling_kwargs, level, total_length, hop_length, max_batch_size): if total_length >= self.priors[level].n_ctx: iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length) for start in iterator: music_tokens = self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size) else: music_tokens = self.sample_partial_window(music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size) return music_tokens @torch.no_grad() def _sample(self, music_tokens, labels, sample_levels, metas=None, chunk_size=32, sampling_temperature=0.98, lower_batch_size=16, max_batch_size=16, sample_length_in_seconds=24, compute_alignments=False, sample_tokens=None, offset=0, save_results=True, sample_length=None) -> List[torch.LongTensor]: top_prior = self.priors[0] if sample_length is not None: total_length = sample_length else: total_length = int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens * top_prior.raw_to_tokens if sample_levels is None: sample_levels = range(len(self.priors)) self.total_length = total_length for level in sample_levels: sampling_kwargs = {'temp': 0.99 if level == len(self.priors) - 1 else sampling_temperature, 'chunk_size': chunk_size, 'sample_tokens': sample_tokens} total_token_to_sample = total_length // self.priors[level].raw_to_tokens hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx) max_batch_size = lower_batch_size if level != sample_levels else max_batch_size music_tokens = self.sample_level(music_tokens, labels[level], offset, sampling_kwargs, level, total_token_to_sample, hop_length, max_batch_size) if save_results: self.vqvae.to(music_tokens[level].device) with torch.no_grad(): start_level = len(self.priors) - level - 1 raw_audio = self.vqvae.decode(music_tokens[:level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0]) logdir = f'jukebox/level_{level}' if not os.path.exists(logdir): os.makedirs(logdir) save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float()) if compute_alignments and self.priors[0] is not None and (self.priors[0].nb_relevant_lyric_tokens > 0): with torch.no_grad(): alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config) torch.save({'alignments': alignments}, f'{logdir}/lyric_alignments.pt') return music_tokens @add_start_docstrings('\n Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically\n upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use\n the VQ-VAE decoder to convert the music tokens to raw audio.\n\n Args:\n labels (`List[torch.LongTensor]`) :\n List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +\n lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens\n which are used to condition the generation.\n n_samples (`int`, *optional*, default to 1) :\n Number of samples to be generated in parallel.\n ') def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors)))) music_tokens = [torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors))] music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings('Generates a continuation of the previously generated tokens.\n\n Args:\n music_tokens (`List[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING) def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors)))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings('Upsamples a sequence of music tokens using the prior at level `level`.\n\n Args:\n music_tokens (`List[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING) def upsample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors) - 1))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings('Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the\n generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are\n used: as conditioning for each level, which means that no ancestral sampling is required.\n\n Args:\n raw_audio (`List[torch.Tensor]` of length `n_samples` ) :\n A list of raw audio that will be used as conditioning information for each samples that will be\n generated.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING) def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors)))) self.vqvae.to(raw_audio.device).float() with torch.no_grad(): music_tokens = self.vqvae.encode(raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0]) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens # File: transformers-main/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py """""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ....tokenization_utils import AddedToken, PreTrainedTokenizer from ....tokenization_utils_base import BatchEncoding from ....utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ....utils.generic import _is_jax, _is_numpy logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'artists_file': 'artists.json', 'lyrics_file': 'lyrics.json', 'genres_file': 'genres.json'} class JukeboxTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, artists_file, genres_file, lyrics_file, version=['v3', 'v2', 'v2'], max_n_lyric_tokens=512, n_genres=5, unk_token='<|endoftext|>', **kwargs): unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token self.version = version self.max_n_lyric_tokens = max_n_lyric_tokens self.n_genres = n_genres self._added_tokens_decoder = {0: unk_token} with open(artists_file, encoding='utf-8') as vocab_handle: self.artists_encoder = json.load(vocab_handle) with open(genres_file, encoding='utf-8') as vocab_handle: self.genres_encoder = json.load(vocab_handle) with open(lyrics_file, encoding='utf-8') as vocab_handle: self.lyrics_encoder = json.load(vocab_handle) oov = '[^A-Za-z0-9.,:;!?\\-\'\\"()\\[\\] \\t\\n]+' if len(self.lyrics_encoder) == 79: oov = oov.replace("\\-'", "\\-+'") self.out_of_vocab = regex.compile(oov) self.artists_decoder = {v: k for (k, v) in self.artists_encoder.items()} self.genres_decoder = {v: k for (k, v) in self.genres_encoder.items()} self.lyrics_decoder = {v: k for (k, v) in self.lyrics_encoder.items()} super().__init__(unk_token=unk_token, n_genres=n_genres, version=version, max_n_lyric_tokens=max_n_lyric_tokens, **kwargs) @property def vocab_size(self): return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def get_vocab(self): return {'artists_encoder': self.artists_encoder, 'genres_encoder': self.genres_encoder, 'lyrics_encoder': self.lyrics_encoder} def _convert_token_to_id(self, list_artists, list_genres, list_lyrics): artists_id = [self.artists_encoder.get(artist, 0) for artist in list_artists] for genres in range(len(list_genres)): list_genres[genres] = [self.genres_encoder.get(genre, 0) for genre in list_genres[genres]] list_genres[genres] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) lyric_ids = [[self.lyrics_encoder.get(character, 0) for character in list_lyrics[0]], [], []] return (artists_id, list_genres, lyric_ids) def _tokenize(self, lyrics): return list(lyrics) def tokenize(self, artist, genre, lyrics, **kwargs): (artist, genre, lyrics) = self.prepare_for_tokenization(artist, genre, lyrics) lyrics = self._tokenize(lyrics) return (artist, genre, lyrics) def prepare_for_tokenization(self, artists: str, genres: str, lyrics: str, is_split_into_words: bool=False) -> Tuple[str, str, str, Dict[str, Any]]: for idx in range(len(self.version)): if self.version[idx] == 'v3': artists[idx] = artists[idx].lower() genres[idx] = [genres[idx].lower()] else: artists[idx] = self._normalize(artists[idx]) + '.v2' genres[idx] = [self._normalize(genre) + '.v2' for genre in genres[idx].split('_')] if self.version[0] == 'v2': self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\-\'\\"()\\[\\] \\t\\n]+') vocab = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'"()[] \t\n' self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))} self.vocab[''] = 0 self.n_vocab = len(vocab) + 1 self.lyrics_encoder = self.vocab self.lyrics_decoder = {v: k for (k, v) in self.vocab.items()} self.lyrics_decoder[0] = '' else: self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\-+\'\\"()\\[\\] \\t\\n]+') lyrics = self._run_strip_accents(lyrics) lyrics = lyrics.replace('\\', '\n') lyrics = (self.out_of_vocab.sub('', lyrics), [], []) return (artists, genres, lyrics) def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _normalize(self, text: str) -> str: accepted = [chr(i) for i in range(ord('a'), ord('z') + 1)] + [chr(i) for i in range(ord('A'), ord('Z') + 1)] + [chr(i) for i in range(ord('0'), ord('9') + 1)] + ['.'] accepted = frozenset(accepted) pattern = re.compile('_+') text = ''.join([c if c in accepted else '_' for c in text.lower()]) text = pattern.sub('_', text).strip('_') return text def convert_lyric_tokens_to_string(self, lyrics: List[str]) -> str: return ' '.join(lyrics) def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False): if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.') import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.') import torch as_tensor = torch.tensor is_tensor = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.') import jax.numpy as jnp as_tensor = jnp.array is_tensor = _is_jax else: as_tensor = np.asarray is_tensor = _is_numpy try: if prepend_batch_axis: inputs = [inputs] if not is_tensor(inputs): inputs = as_tensor(inputs) except: raise ValueError("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.") return inputs def __call__(self, artist, genres, lyrics='', return_tensors='pt') -> BatchEncoding: input_ids = [0, 0, 0] artist = [artist] * len(self.version) genres = [genres] * len(self.version) (artists_tokens, genres_tokens, lyrics_tokens) = self.tokenize(artist, genres, lyrics) (artists_id, genres_ids, full_tokens) = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens) attention_masks = [-INFINITY] * len(full_tokens[-1]) input_ids = [self.convert_to_tensors([input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors) for i in range(len(self.version))] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks}) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return artists_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file']) with open(artists_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.artists_encoder, ensure_ascii=False)) genres_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file']) with open(genres_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.genres_encoder, ensure_ascii=False)) lyrics_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file']) with open(lyrics_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False)) return (artists_file, genres_file, lyrics_file) def _convert_id_to_token(self, artists_index, genres_index, lyric_index): artist = self.artists_decoder.get(artists_index) genres = [self.genres_decoder.get(genre) for genre in genres_index] lyrics = [self.lyrics_decoder.get(character) for character in lyric_index] return (artist, genres, lyrics) # File: transformers-main/src/transformers/models/deprecated/mctct/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_mctct': ['MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_mctct'] = ['MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel'] if TYPE_CHECKING: from .configuration_mctct import MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/mctct/configuration_mctct.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class MCTCTConfig(PretrainedConfig): model_type = 'mctct' def __init__(self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-05, layerdrop=0.3, hidden_act='relu', initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction='sum', ctc_zero_infinity=False, **kwargs): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.layerdrop = layerdrop self.hidden_act = hidden_act self.initializer_range = initializer_range self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.conv_glu_dim = conv_glu_dim self.conv_dropout = conv_dropout self.num_conv_layers = num_conv_layers self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.conv_channels = conv_channels self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity self.conv_kernel = list(conv_kernel) self.conv_stride = list(conv_stride) if len(self.conv_kernel) != self.num_conv_layers: raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.conv_kernel)` == `config.num_conv_layers` but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, `config.num_conv_layers = {self.num_conv_layers}`.') # File: transformers-main/src/transformers/models/deprecated/mctct/feature_extraction_mctct.py """""" from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging logger = logging.get_logger(__name__) class MCTCTFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_features', 'attention_mask'] def __init__(self, feature_size=80, sampling_rate=16000, padding_value=0.0, hop_length=10, win_length=25, win_function='hamming_window', frame_signal_scale=32768.0, preemphasis_coeff=0.97, mel_floor=1.0, normalize_means=True, normalize_vars=True, return_attention_mask=False, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.feature_size = feature_size self.sampling_rate = sampling_rate self.padding_value = padding_value self.hop_length = hop_length self.win_length = win_length self.frame_signal_scale = frame_signal_scale self.preemphasis_coeff = preemphasis_coeff self.mel_floor = mel_floor self.normalize_means = normalize_means self.normalize_vars = normalize_vars self.win_function = win_function self.return_attention_mask = return_attention_mask self.sample_size = win_length * sampling_rate // 1000 self.sample_stride = hop_length * sampling_rate // 1000 self.n_fft = optimal_fft_length(self.sample_size) self.n_freqs = self.n_fft // 2 + 1 def _extract_mfsc_features(self, one_waveform: np.array) -> np.ndarray: if self.win_function == 'hamming_window': window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False) else: window = window_function(window_length=self.sample_size, name=self.win_function) fbanks = mel_filter_bank(num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate) msfc_features = spectrogram(one_waveform * self.frame_signal_scale, window=window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=False, preemphasis=self.preemphasis_coeff, mel_filters=fbanks, mel_floor=self.mel_floor, log_mel='log') return msfc_features.T def _normalize_one(self, x, input_length, padding_value): if self.normalize_means: mean = x[:input_length].mean(axis=0) x = np.subtract(x, mean) if self.normalize_vars: std = x[:input_length].std(axis=0) x = np.divide(x, std) if input_length < x.shape[0]: x[input_length:] = padding_value x = x.astype(np.float32) return x def normalize(self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray]=None) -> List[np.ndarray]: lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(x, n, self.padding_value) for (x, n) in zip(input_features, lengths)] def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy]=False, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the ``sampling_rate`` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [raw_speech] features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech] encoded_inputs = BatchFeature({'input_features': features}) padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, **kwargs) input_features = padded_inputs.get('input_features') if isinstance(input_features[0], list): padded_inputs['input_features'] = [np.asarray(feature, dtype=np.float32) for feature in input_features] attention_mask = padded_inputs.get('attention_mask') if attention_mask is not None: padded_inputs['attention_mask'] = [np.asarray(array, dtype=np.int32) for array in attention_mask] if self.normalize_means or self.normalize_vars: attention_mask = np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD and padding else None padded_inputs['input_features'] = self.normalize(padded_inputs['input_features'], attention_mask=attention_mask) if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs # File: transformers-main/src/transformers/models/deprecated/mctct/modeling_mctct.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ....activations import ACT2FN from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ....integrations.deepspeed import is_deepspeed_zero3_enabled from ....modeling_attn_mask_utils import _prepare_4d_attention_mask from ....modeling_outputs import BaseModelOutput, CausalLMOutput from ....modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ....utils import logging from .configuration_mctct import MCTCTConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 _CONFIG_FOR_DOC = 'MCTCTConfig' _CHECKPOINT_FOR_DOC = 'speechbrain/m-ctc-t-large' _EXPECTED_OUTPUT_SHAPE = [1, 195, 1536] _CTC_EXPECTED_OUTPUT = '"Mr. Quilter is the apostle of the middle classes, and we\'re glad to welcome his gospel."' _CTC_EXPECTED_LOSS = 1885.65 class MCTCTConv1dSubsampler(nn.Module): def __init__(self, config): super().__init__() self.config = config self.glu_dim = config.conv_glu_dim self.dropout = nn.Dropout(config.conv_dropout) self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels if self.num_layers > 1: if config.conv_channels is None: raise ValueError('Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution layers.') self.mid_channels = config.conv_channels else: self.mid_channels = None self.out_channels = config.hidden_size * 2 self.kernel_size = config.conv_kernel self.stride = config.conv_stride self.conv_layers = nn.ModuleList((nn.Conv1d(self.in_channels if i == 0 else self.mid_channels[i], self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels, kernel_size=k, stride=self.stride[i], padding='valid') for (i, k) in enumerate(self.kernel_size))) def forward(self, input_features): padding = sum([size // 2 for size in self.kernel_size]) input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), 'constant', 0) hidden_states = input_features.transpose(1, 2).contiguous() for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2).contiguous() return hidden_states class MCTCTEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = MCTCTLayerNorm() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False) def forward(self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_features) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MCTCTSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_dim self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def reshape_fortran(self, x, shape): if len(x.shape) > 0: x = x.permute(*reversed(range(len(x.shape)))) return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape)))) def relative_position_embedding_rotate(self, scores): scores = scores.permute(0, 2, 3, 1) (batch, hidden_state, seq_len, heads) = scores.shape scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1) scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads]) scores = scores[:, :(seq_len + hidden_state - 1) * seq_len] scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads]) halfpoint = hidden_state // 2 scores = scores[:, halfpoint:halfpoint + seq_len].transpose(1, 2) return scores.permute(0, 3, 1, 2) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) positional_embedding = self.distance_embedding.weight relative_position_scores = torch.einsum('lh, bche -> bcle', positional_embedding, query_layer.transpose(2, 3)) relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores) attention_scores = attention_scores + relative_position_scores if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MCTCTLayerNorm(nn.Module): def __init__(self): super().__init__() self.singleton_weight = nn.Parameter(torch.ones(1)) self.singleton_bias = nn.Parameter(torch.zeros(1)) def forward(self, hidden_states): return hidden_states * self.singleton_weight + self.singleton_bias class MCTCTSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MCTCTSelfAttention(config) self.output = MCTCTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class MCTCTIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class MCTCTOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class MCTCTLayer(nn.Module): def __init__(self, config: MCTCTConfig): super().__init__() self.seq_len_dim = 1 self.chunk_size_feed_forward = config.chunk_size_feed_forward self.intermediate = MCTCTIntermediate(config) self.attention = MCTCTAttention(config) self.is_decoder = config.is_decoder self.output = MCTCTOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class MCTCTPreTrainedModel(PreTrainedModel): config_class = MCTCTConfig base_model_prefix = 'mctct' main_input_name = 'input_features' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MCTCTLayerNorm): module.singleton_weight.data.fill_(1.0) module.singleton_bias.data.zero_() if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): dilation = 1 for (_, kernel_sz, stride) in zip(range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride): padding = kernel_sz // 2 input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1 input_lengths = torch.div(input_lengths, stride, rounding_mode='trunc') + 1 return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask): if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)) bsz = attention_mask.size()[0] attention_mask = torch.zeros((bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask[torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask MCTCT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' MCTCT_INPUTS_DOCSTRING = '\n Args:\n input_features (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' class MCTCTEncoder(MCTCTPreTrainedModel): def __init__(self, config: MCTCTConfig): super().__init__(config) self.hidden_dropout_prob = config.hidden_dropout_prob self.layer_norm = MCTCTLayerNorm() self.conv = MCTCTConv1dSubsampler(config) self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, input_features: torch.Tensor, attention_mask: torch.Tensor, head_mask: torch.Tensor, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_features = self.layer_norm(input_features) inputs_embeds = self.conv(input_features) if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask) hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and dropout_probability < self.config.layerdrop else False if not skip_the_layer or deepspeed_zero3_is_enabled: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) @add_start_docstrings('The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.', MCTCT_START_DOCSTRING) class MCTCTModel(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = MCTCTEncoder(config) self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_features is None: raise ValueError('You have to specify input_features.') encoder_outputs = self.encoder(input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', MCTCT_START_DOCSTRING) class MCTCTForCTC(MCTCTPreTrainedModel): def __init__(self, config): super().__init__(config) self.mctct = MCTCTModel(config) if config.vocab_size is None: raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.") output_hidden_size = config.hidden_size self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size) self.post_init() @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS) def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutput]: if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mctct(input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.ctc_head(hidden_states) loss = None if labels is not None: attention_mask = attention_mask if attention_mask is not None else torch.ones(input_features.shape[:-1], dtype=torch.long) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/mctct/processing_mctct.py """""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class MCTCTProcessor(ProcessorMixin): feature_extractor_class = 'MCTCTFeatureExtractor' tokenizer_class = 'AutoTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if 'raw_speech' in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.') audio = kwargs.pop('raw_speech') else: audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def pad(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor.pad(*args, **kwargs) input_features = kwargs.pop('input_features', None) labels = kwargs.pop('labels', None) if len(args) > 0: input_features = args[0] args = args[1:] if input_features is not None: input_features = self.feature_extractor.pad(input_features, *args, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features['labels'] = labels['input_ids'] return input_features def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False # File: transformers-main/src/transformers/models/deprecated/mega/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_mega': ['MegaConfig', 'MegaOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_mega'] = ['MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel'] if TYPE_CHECKING: from .configuration_mega import MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/mega/configuration_mega.py """""" from collections import OrderedDict from typing import Mapping from ....configuration_utils import PretrainedConfig from ....onnx import OnnxConfig from ....utils import logging logger = logging.get_logger(__name__) class MegaConfig(PretrainedConfig): model_type = 'mega' def __init__(self, vocab_size=30522, hidden_size=128, num_hidden_layers=4, intermediate_size=256, ema_projection_size=16, bidirectional=True, shared_representation_size=64, use_chunking=False, chunk_size=-1, truncation=None, normalize_before_mega=True, normalization_type='scalenorm', norm_affine=True, activation='silu', attention_activation='softmax', dropout_prob=0.1, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, use_feature_dropout=False, use_normalized_ffn=True, nffn_hidden_size=256, normalize_before_ffn=True, nffn_activation_dropout_prob=0.1, max_positions=2048, add_token_type_embeddings=False, type_vocab_size=2, initializer_range=0.02, ema_delta_alpha_range=0.2, ema_beta_range=0.02, ema_gamma_omega_range=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, relative_positional_bias='rotary', classifier_dropout=None, use_cache=True, add_lm_hidden_dense_layer=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.activation = activation self.attention_activation = attention_activation self.intermediate_size = intermediate_size self.ema_projection_size = ema_projection_size self.bidirectional = bidirectional self.shared_representation_size = shared_representation_size self.use_chunking = use_chunking self.chunk_size = chunk_size self.truncation = truncation self.normalize_before_mega = normalize_before_mega self.normalization_type = normalization_type self.norm_affine = norm_affine self.dropout_prob = dropout_prob self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.use_feature_dropout = use_feature_dropout self.use_normalized_ffn = use_normalized_ffn self.nffn_hidden_size = nffn_hidden_size self.normalize_before_ffn = normalize_before_ffn self.nffn_activation_dropout_prob = nffn_activation_dropout_prob self.max_positions = max_positions self.add_token_type_embeddings = add_token_type_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.ema_delta_alpha_range = ema_delta_alpha_range self.ema_beta_range = ema_beta_range self.ema_gamma_omega_range = ema_gamma_omega_range self.relative_positional_bias = relative_positional_bias self.use_cache = use_cache self.classifier_dropout = classifier_dropout self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer self.num_attention_heads = 1 class MegaOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/deprecated/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import os import pickle as pkl import torch from torch import nn from transformers import AutoTokenizer, MegaConfig, MegaForMaskedLM try: from fairseq.modules.mega_layer import MegaEncoderLayer except ImportError: raise ImportError('You need to install the version of fairseq from the Mega repo!') class MegaLM(nn.Module): def __init__(self, mega_args, depth, vocab_size): super().__init__() self.mega_args = mega_args self.embedding_layer = nn.Embedding(vocab_size, self.mega_args.encoder_embed_dim) self.encoders = nn.ModuleList([MegaEncoderLayer(self.mega_args) for _ in range(depth)]) self.depth = depth def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): if batch_first: input_ids = input_ids.T if ignore_mask_value == 0: attention_mask = 1 - attention_mask embeds = self.embedding_layer(input_ids) for encoder in self.encoders: embeds = encoder(embeds, attention_mask) if batch_first: return torch.transpose(embeds, 0, 1) else: return embeds class OriginalMegaForMaskedLM(nn.Module): def __init__(self, mega_args, depth, vocab_size): super().__init__() self.mega = MegaLM(mega_args, depth, vocab_size) self.mlm_head = nn.Linear(mega_args.encoder_embed_dim, vocab_size) self.dropout = nn.Dropout(p=0.1) def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): encoder_output = self.mega(input_ids, attention_mask, batch_first, ignore_mask_value) return self.mlm_head(self.dropout(encoder_output)) def convert_checkpoint_to_huggingface(pretrained_checkpoint_path, output_path, includes_tokenizer): with open(os.path.join(pretrained_checkpoint_path, 'model_args.pkl'), 'rb') as f: mega_original_args = pkl.load(f) original_mlm = OriginalMegaForMaskedLM(**mega_original_args).eval() print('Original Mega encoder:', original_mlm.mega.load_state_dict(torch.load(os.path.join(pretrained_checkpoint_path, 'encoder_weights.pt'), map_location='cpu'))) print('Original Mega MLM layer:', original_mlm.mlm_head.load_state_dict(torch.load(os.path.join(pretrained_checkpoint_path, 'mlm_head_weights.pt'), map_location='cpu'))) hf_config = MegaConfig(num_hidden_layers=mega_original_args['depth'], vocab_size=mega_original_args['vocab_size'], hidden_size=mega_original_args['mega_args'].encoder_embed_dim, shared_representation_size=mega_original_args['mega_args'].encoder_z_dim, intermediate_size=mega_original_args['mega_args'].encoder_hidden_dim, ema_projection_size=mega_original_args['mega_args'].encoder_n_dim, dropout_prob=mega_original_args['mega_args'].dropout, attention_probs_dropout_prob=mega_original_args['mega_args'].attention_dropout, hidden_dropout_prob=mega_original_args['mega_args'].hidden_dropout, activation=mega_original_args['mega_args'].activation_fn, attention_activation=mega_original_args['mega_args'].attention_activation_fn, bidirectional=mega_original_args['mega_args'].bidirectional, use_chunking=mega_original_args['mega_args'].encoder_chunk_size > 0, chunk_size=mega_original_args['mega_args'].encoder_chunk_size, truncation=mega_original_args['mega_args'].truncation_length, normalization_type=mega_original_args['mega_args'].normalization_type, normalize_before_mega=True, norm_affine=True, use_feature_dropout=mega_original_args['mega_args'].feature_dropout, relative_positional_bias=mega_original_args['mega_args'].rel_pos_bias, max_positions=mega_original_args['mega_args'].max_source_positions, nffn_hidden_size=mega_original_args['mega_args'].encoder_ffn_embed_dim, normalize_before_ffn=mega_original_args['mega_args'].normalize_before, nffn_activation_dropout_prob=0.0, add_token_type_embeddings=False, add_lm_hidden_dense_layer=False) hf_mlm = MegaForMaskedLM(hf_config).eval() hf_mlm.mega.embedding_layer.word_embeddings.weight = original_mlm.mega.embedding_layer.weight original_state_dict = original_mlm.mega.encoders.state_dict() updated_keys = {} for module_name in original_state_dict.keys(): new_module_name = None if 'beta' in module_name: if 'move.beta' in module_name: new_module_name = module_name.replace('move.beta', 'ema_gate.ema_expansion_matrix') elif 'mega_layer.beta' in module_name: new_module_name = module_name.replace('beta', 'qk_bias') else: new_module_name = module_name.replace('beta', 'b_param') elif 'gamma' in module_name: if 'move.gamma' in module_name: new_module_name = module_name.replace('move.gamma', 'ema_gate.kernel_projection_matrix') elif 'mega_layer.gamma' in module_name: new_module_name = module_name.replace('gamma', 'qk_weight') else: new_module_name = module_name.replace('gamma', 'g_param') elif 'move.alpha' in module_name: new_module_name = module_name.replace('move.alpha', 'ema_gate.decay_factor') elif 'move.delta' in module_name: new_module_name = module_name.replace('move.delta', 'ema_gate.damping_factor') elif 'omega' in module_name: new_module_name = module_name.replace('move.omega', 'ema_gate.residual_weight') if new_module_name: updated_keys[module_name] = new_module_name if len(updated_keys) != 0: print(f'Renaming these keys: {updated_keys.keys()}') else: print('No need to rename state dict entries') for (old, new) in updated_keys.items(): original_state_dict[new] = original_state_dict.pop(old) print('HF Mega encoder:', hf_mlm.mega.layers.load_state_dict(original_state_dict)) print('HF Mega MLM layer:', hf_mlm.mlm_head.load_state_dict(torch.load(os.path.join(pretrained_checkpoint_path, 'mlm_head_weights.pt'), map_location='cpu'))) input_ids = torch.randint(0, hf_config.vocab_size, size=(4, 256)) input_mask = torch.ones_like(input_ids) input_mask[:, -10:] = 0 original_output = original_mlm(input_ids, input_mask, batch_first=True, ignore_mask_value=0) hf_output = hf_mlm(input_ids, input_mask)[0] print(f'original output {original_output.shape}') print(f'hf output {hf_output.shape}') print(f'max diff: {(original_output - hf_output).max()}') success = torch.allclose(original_output, hf_output, atol=0.001) if success: print('Yay!') hf_mlm.save_pretrained(output_path) else: raise RuntimeError(f"Something's broken :(\nOriginal:\n{original_output}\n\nHF\n{hf_output}\n{hf_mlm}") if includes_tokenizer: print('Transferring tokenizer') tokenizer = AutoTokenizer.from_pretrained(pretrained_checkpoint_path) tokenizer.save_pretrained(output_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pretrained_checkpoint_path', default=None, type=str, required=True, help='Point to the directory containing your model weights using the official Mega repo') parser.add_argument('--output_path', default=None, type=str, required=True, help='Location to save the Hugging Face version') parser.add_argument('--includes_tokenizer', action='store_true', help='Use this flag if there is a Hugging Face tokenizer in the original checkpoint repo') args = parser.parse_args() convert_checkpoint_to_huggingface(args.pretrained_checkpoint_path, args.output_path, args.includes_tokenizer) # File: transformers-main/src/transformers/models/deprecated/mega/modeling_mega.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import ALL_LAYERNORM_LAYERS from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_mega import MegaConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'mnaylor/mega-base-wikitext' _CONFIG_FOR_DOC = 'MegaConfig' class MegaEmbeddings(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.use_token_types = config.add_token_type_embeddings if self.use_token_types: self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.register_buffer('token_type_ids', torch.zeros(config.max_positions, dtype=torch.long).expand((1, -1)), persistent=False) self.padding_idx = config.pad_token_id def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is None and inputs_embeds is None: raise ValueError('Must provide one of input_ids or inputs_embeds') elif input_ids is not None: input_shape = input_ids.size() device = input_ids.device inputs_embeds = self.word_embeddings(input_ids) else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device if self.use_token_types: if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :input_shape[1]] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], input_shape[1]) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings else: embeddings = inputs_embeds return embeddings class MegaSimpleRelativePositionalBias(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.config = config self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size self.rel_pos_bias = nn.Parameter(torch.Tensor(2 * config.max_positions - 1)) def forward(self, seq_len): if seq_len > self.max_positions: raise ValueError('Sequence length {} going beyond max length {}'.format(seq_len, self.max_positions)) bias = self.rel_pos_bias[self.max_positions - seq_len:self.max_positions + seq_len - 1] tile = F.pad(bias, (0, seq_len)) tile = torch.tile(tile, (seq_len,)) tile = tile[:-seq_len] tile = tile.view(seq_len, 3 * seq_len - 2) start = (2 * seq_len - 1) // 2 end = tile.size(1) - start tile = tile[:, start:end] return tile class MegaRotaryRelativePositionalBias(nn.Module): def __init__(self, config: MegaConfig): super().__init__() if config.hidden_size % 2 != 0: raise RuntimeError('Rotary positional bias requires `hidden_size` to be a multiple of 2') self.config = config self.embed_dim = config.shared_representation_size self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size (self.sine, self.cosine) = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(config.max_positions, self.embed_dim) self.alpha = nn.Parameter(torch.Tensor(1, self.embed_dim)) self.b_param = nn.Parameter(torch.Tensor(1, self.embed_dim)) self.register_buffer('_float_tensor', torch.FloatTensor([0.0])) @staticmethod def get_sinusoid_embeddings(max_positions: int, embedding_dim: int): half_dim = embedding_dim // 2 emb = math.log(10000) / half_dim emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) return (torch.sin(emb), torch.cos(emb)) def rotary(self, input): (seq_len, embed_dim) = input.size() (chunk_1, chunk_2) = torch.chunk(input, 2, dim=-1) if self.sine is None or seq_len > self.sine.size(0): (self.sine, self.cosine) = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(seq_len, embed_dim) self.max_positions = seq_len self.sine = self.sine.to(self._float_tensor) self.cosine = self.cosine.to(self._float_tensor) sin = self.sine[:seq_len] cos = self.cosine[:seq_len] return torch.cat([chunk_1 * cos - chunk_2 * sin, chunk_2 * cos + chunk_1 * sin], dim=1) def forward(self, seq_len): rotary_alpha = self.rotary(self.alpha.expand(seq_len, self.embed_dim)) rotary_beta = self.rotary(self.b_param.expand(seq_len, self.embed_dim)) bias = torch.einsum('mk,nk->mn', rotary_alpha, rotary_beta) return bias class MegaDropout(nn.Module): def __init__(self, dropout_probability, is_featurewise=False): super().__init__() self.dropout_probability = dropout_probability self.is_featurewise = is_featurewise def forward(self, input, batch_first: bool=False): if self.is_featurewise: if batch_first: return F.dropout2d(input.transpose(-1, -2), p=self.dropout_probability, training=self.training).transpose(-1, -2) else: if input.dim() != 3: raise ValueError('Feature dropout inputs must be exactly 3-dimensional if inputs are ordered [sequence length, batch size, hidden dimension]') return F.dropout2d(input.permute(1, 2, 0), p=self.dropout_probability, training=self.training).permute(2, 0, 1) else: return F.dropout(input, p=self.dropout_probability, training=self.training) class MegaRMSNorm(nn.Module): def __init__(self, number_features, eps=1e-06, affine=True): super().__init__() self.num_features = number_features self.eps = eps self.affine = affine if affine: self.weight = nn.Parameter(torch.Tensor(self.num_features)) else: self.register_parameter('weight', None) def forward(self, input): mean_square = torch.mean(torch.square(input), dim=-1, keepdim=True) if self.weight is not None: input = input * self.weight input * torch.rsqrt(mean_square + self.eps) return input def extra_repr(self): return f'{self.num_features}, eps={self.eps}, affine={self.affine}' class MegaScaleNorm(nn.Module): def __init__(self, dim, eps=1e-06, affine=True): super().__init__() self.dim = dim self.eps = eps self.affine = affine if affine: self.scalar = nn.Parameter(torch.Tensor(1)) else: self.register_parameter('scalar', None) def forward(self, input): mean_square = torch.mean(torch.square(input), dim=self.dim, keepdim=True) if self.scalar is not None: input = self.scalar * input output = input * torch.rsqrt(mean_square + self.eps) return output class MegaSequenceNorm(nn.Module): def __init__(self, norm_type, embedding_dim, eps=1e-05, affine=True, export=False): super().__init__() if norm_type == 'layernorm': self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine=affine) elif norm_type == 'scalenorm': self.norm = MegaScaleNorm(dim=-1, eps=eps, affine=affine) elif norm_type == 'rmsnorm': self.norm = MegaRMSNorm(embedding_dim, eps=eps, affine=affine) elif norm_type == 'batchnorm': self.norm = nn.BatchNorm1d(embedding_dim, eps=eps, affine=affine) elif norm_type == 'syncbatchnorm': self.norm = nn.SyncBatchNorm(embedding_dim, eps=eps, affine=affine) else: raise ValueError('Unknown norm type: {}'.format(norm_type)) def forward(self, input): if isinstance(self.norm, nn.modules.batchnorm._BatchNorm): if input.dim() != 3: raise ValueError('BatchNorm inputs must be exactly 3-dimensional') input = input.permute(1, 2, 0) input = self.norm(input) return input.permute(2, 0, 1) else: return self.norm(input) ALL_LAYERNORM_LAYERS.append(MegaSequenceNorm) class MegaMultiDimensionDampedEma(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.ndim = config.ema_projection_size self.bidirectional = config.bidirectional self.truncation = config.truncation self.scale = math.sqrt(1.0 / self.ndim) kernel_dim = 2 * config.hidden_size if self.bidirectional else config.hidden_size self.damping_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.decay_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.ema_expansion_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) self.kernel_projection_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim)) self.residual_weight = nn.Parameter(torch.Tensor(config.hidden_size)) self._kernel = None self._coeffs = None def _compute_ema_coefficients(self): self._coeffs = None damping_factor = torch.sigmoid(self.damping_factor) decay_factor = torch.sigmoid(self.decay_factor) previous_timestep_weight = 1.0 - damping_factor * decay_factor return (damping_factor, previous_timestep_weight) def _compute_efficient_ema_kernel(self, length: int): self._kernel = None (damping_factor, previous_timestep_weight) = self._compute_ema_coefficients() vander = torch.arange(length).to(damping_factor).view(1, 1, length) * torch.log(previous_timestep_weight) kernel = damping_factor * self.ema_expansion_matrix * torch.exp(vander) return torch.einsum('dnl,dn->dl', kernel, self.kernel_projection_matrix * self.scale) def get_ema_coefficients(self): if self.training: return self._compute_ema_coefficients() else: if self._coeffs is None: self._coeffs = self._compute_ema_coefficients() return self._coeffs def get_ema_kernel(self, length: int): kernel_size = length if self.truncation is None else min(self.truncation, length) if self.training: return self._compute_efficient_ema_kernel(kernel_size) else: if self._kernel is None or self._kernel.size(-1) < kernel_size: self._kernel = self._compute_efficient_ema_kernel(kernel_size) return self._kernel[..., :kernel_size] def fft_convolution(self, inputs, kernel, length): inputs_fft = torch.fft.rfft(inputs.float(), n=2 * length) kernel_fft = torch.fft.rfft(kernel.float(), n=2 * length) convolved_sequence = torch.fft.irfft(inputs_fft * kernel_fft, n=2 * length) return convolved_sequence def ema_step(self, inputs, length, past_state=None): if length == 1: return self.one_ema_step(inputs, past_state=past_state) (damping_factor, previous_timestep_weight) = self.get_ema_coefficients() vander = torch.arange(length + 1).to(damping_factor).view(1, 1, length + 1) * torch.log(previous_timestep_weight) vander = torch.exp(vander) if past_state is not None: past_ema_proj = vander[:, :, 1:] * (self.kernel_projection_matrix * self.scale).unsqueeze(-1) past_ema_state = torch.einsum('bdn,dnl->bdl', past_state, past_ema_proj) past_vandermonde = vander[:, :, -1] * past_state else: past_ema_state = None past_vandermonde = None vander = vander[:, :, :-1] kernel = damping_factor * self.ema_expansion_matrix * vander kernel_proj = torch.einsum('dnl,dn->dl', kernel, self.kernel_projection_matrix * self.scale) ema_output = self.fft_convolution(inputs, kernel_proj, length=length)[..., 0:length] ema_output = ema_output.type_as(inputs) if past_ema_state is not None: ema_output = ema_output + past_ema_state updated_hidden_state = torch.einsum('bdl,dnl->bdn', inputs, torch.flip(kernel, dims=[2])) if past_vandermonde is not None: updated_hidden_state = updated_hidden_state + past_vandermonde return (ema_output.permute(2, 0, 1), updated_hidden_state) def one_ema_step(self, inputs, past_state=None): (damping_factor, previous_timestep_weight) = self.get_ema_coefficients() updated_state = (damping_factor * self.ema_expansion_matrix).squeeze(-1) * inputs if past_state is not None: updated_state = updated_state + previous_timestep_weight.squeeze(-1) * past_state out = torch.einsum('bdn,dn->bd', updated_state, self.kernel_projection_matrix * self.scale) return (out.unsqueeze(0), updated_state) def forward(self, inputs, attention_mask: Optional[torch.Tensor]=None, prev_state: Optional[torch.Tensor]=None, use_cache: bool=False) -> torch.Tensor: (seq_len, bsz, embed_dim) = inputs.size() if embed_dim != self.embed_dim: raise ValueError(f'Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}') residual = inputs * self.residual_weight inputs = inputs.permute(1, 2, 0) if attention_mask is not None: inputs = inputs * attention_mask.unsqueeze(1).type_as(inputs) if self.bidirectional and use_cache: raise RuntimeError('Bidirectional EMA does not support incremental state') if use_cache: (out, updated_state) = self.ema_step(inputs, seq_len, past_state=prev_state) out = F.silu(out + residual) return (out, updated_state) else: kernel = self.get_ema_kernel(seq_len) fft_len = seq_len s_index = 0 kernel_size = kernel.size(1) if self.bidirectional: (k1, k2) = torch.split(kernel, [self.embed_dim, self.embed_dim], dim=0) kernel = F.pad(k1, (kernel_size - 1, 0)) + F.pad(k2.flip(-1), (0, kernel_size - 1)) inputs = F.pad(inputs, (kernel_size - 1, 0)) fft_len = fft_len + kernel_size - 1 s_index = 2 * kernel_size - 2 ema_output = self.fft_convolution(inputs, kernel, length=fft_len)[..., s_index:s_index + seq_len] ema_output = ema_output.type_as(inputs) gated_ema_output = F.silu(ema_output.permute(2, 0, 1) + residual) return (gated_ema_output, None) class MegaGatedCrossAttention(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.config = config self.activation = ACT2FN[self.config.activation] self.attention_activation = self.config.attention_activation self.scaling = self.config.shared_representation_size ** (-0.5) if self.attention_activation == 'softmax' else None self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) self.prenorm = self.config.normalize_before_mega self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.k_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size) self.v_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) self.q_proj = nn.Linear(self.config.hidden_size, 2 * self.config.hidden_size + self.config.shared_representation_size) self.h_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) if self.config.relative_positional_bias == 'simple': self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) elif self.config.relative_positional_bias == 'rotary': self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) else: raise ValueError('unknown relative position bias: {}'.format(self.config.relative_positional_bias)) self.softmax = nn.Softmax(dim=-1) def element_attention(self, query, key, key_padding_mask, pidx): (bsz, src_len, _) = key.size() tgt_len = query.size(1) if pidx is None else pidx + 1 if key_padding_mask is not None: lengths = key_padding_mask.sum(dim=-1).view(bsz, 1, 1) else: lengths = src_len bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] if pidx is not None: if query.size(1) != 1: raise ValueError('Position offset provided with queries longer than 1 token') bias = bias[pidx] else: bias = bias[:tgt_len] qk = torch.bmm(query, key.transpose(1, 2)) / lengths + bias attn_weights = ACT2FN[self.attention_activation](qk).type_as(qk) if key_padding_mask is not None: attn_weights = attn_weights * key_padding_mask.unsqueeze(1) return attn_weights def softmax_attention(self, query, key, key_padding_mask, pidx): (bsz, src_len, _) = key.size() tgt_len = query.size(1) if pidx is None else pidx + 1 bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] if pidx is not None: if query.size(1) != 1: raise ValueError('Position offset provided with queries longer than 1 token') bias = bias[pidx] else: bias = bias[:tgt_len] query = query * self.scaling qk = torch.bmm(query, key.transpose(1, 2)) + bias if key_padding_mask is not None: qk = qk.masked_fill((1 - key_padding_mask).unsqueeze(1).to(torch.bool), float('-inf')) attn_weights = self.softmax(qk).type_as(qk) return attn_weights def forward(self, query, key: Optional[torch.Tensor], value: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (seq_len, bsz, embed_dim) = query.size() if embed_dim != self.config.hidden_size: raise ValueError(f'Unexpected embedding dimension received: input is {embed_dim} but expected {self.config.hidden_size}') if past_key_values is not None: if seq_len != 1: raise ValueError(f'Incremental decoding requested with self-sequence length > 1: {seq_len}') (prev_cross_key, prev_cross_value) = past_key_values[-2:] key = value = None prev_self_key = past_key_values[0] num_incremental_steps = prev_self_key.size(1) + 1 else: prev_cross_key = prev_cross_value = None num_incremental_steps = 0 if use_cache and seq_len == 1 else None full_query = query if self.prenorm: full_query = self.norm(full_query) query_projected = self.q_proj(full_query) (residual_weight, target_gate, attention_query) = torch.split(query_projected, [self.config.hidden_size, self.config.hidden_size, self.config.shared_representation_size], dim=-1) residual_weight = torch.sigmoid(residual_weight) target_gate = F.silu(target_gate) if key is None: if value is not None: raise ValueError('Key and value must be `None` simultaneously') projected_key = projected_value = None else: projected_key = self.k_proj(key) projected_value = self.activation(self.v_proj(key)) attention_query = attention_query.transpose(0, 1) if projected_key is not None: projected_key = projected_key.transpose(0, 1) if projected_value is not None: projected_value = projected_value.transpose(0, 1) if past_key_values is not None: projected_key = prev_cross_key projected_value = prev_cross_value if use_cache: updated_cross_key = projected_key updated_cross_value = projected_value ctx_len = projected_key.size(1) if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: if key_padding_mask.size(0) != bsz: raise ValueError('Key padding mask does not align on the batch dimension') if key_padding_mask.size(1) != ctx_len: raise ValueError('Key padding mask does not align on the sequence length dimension') if self.attention_activation == 'softmax': attn_weights = self.softmax_attention(attention_query, projected_key, key_padding_mask, num_incremental_steps) else: attn_weights = self.element_attention(attention_query, projected_key, key_padding_mask, num_incremental_steps) projected_value = self.hidden_dropout(projected_value, batch_first=True) kernel = self.attention_dropout(attn_weights) weighted_targets = torch.bmm(kernel, projected_value).transpose(0, 1) weighted_targets = self.activation(self.h_proj(weighted_targets * target_gate)) weighted_targets = self.dropout(weighted_targets) out = torch.addcmul(query, residual_weight, weighted_targets - query) if not self.prenorm: out = self.norm(out) outputs = (out, attn_weights) if output_attentions else (out,) if use_cache: outputs = outputs + (updated_cross_key, updated_cross_value) return outputs class MegaMovingAverageGatedAttention(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.config = config self.activation = ACT2FN[self.config.activation] self.scaling = self.config.shared_representation_size ** (-0.5) if self.config.attention_activation == 'softmax' else None self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.ema_gate = MegaMultiDimensionDampedEma(config) self.v_proj = nn.Linear(self.config.hidden_size, self.config.intermediate_size) self.mx_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size + self.config.intermediate_size + 2 * self.config.hidden_size) self.h_proj = nn.Linear(self.config.intermediate_size, self.config.hidden_size) self.qk_weight = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) self.qk_bias = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) if self.config.relative_positional_bias == 'simple': self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) elif self.config.relative_positional_bias == 'rotary': self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) else: raise ValueError(f'Unknown relative positional bias: {self.config.relative_positional_bias}') self.softmax = nn.Softmax(dim=-1) self.attention_function = self.softmax_attention if self.config.attention_activation == 'softmax' else self.element_attention def element_attention(self, query, key, padding_mask, causal_mask): seq_len = key.size(2) if padding_mask is not None: lengths = padding_mask.sum(-1, keepdim=True) lengths = lengths.clamp(min=1.0).unsqueeze(-1) else: lengths = seq_len if causal_mask is not None: lengths = causal_mask.sum(dim=-1, keepdim=True) bias = self.rel_pos_bias(seq_len) if seq_len != query.size(2): if query.size(2) != 1: raise ValueError('Size mismatch between Q and K in element attention') bias = bias[-1:] qk = torch.matmul(query, key.transpose(2, 3)) / lengths + bias attn_weights = ACT2FN[self.config.attention_activation](qk).type_as(qk) if padding_mask is not None: attn_weights = attn_weights * padding_mask.unsqueeze(2) if causal_mask is not None: attn_weights = attn_weights * causal_mask return attn_weights def softmax_attention(self, query, key, padding_mask, causal_mask): seq_len = key.size(2) bias = self.rel_pos_bias(seq_len) if seq_len != query.size(2): if query.size(2) != 1: raise ValueError('Size mismatch between Q and K in softmax attention') bias = bias[-1:] query = query * self.scaling qk = torch.matmul(query, key.transpose(2, 3)) + bias if causal_mask is not None: additive_causal_mask = torch.zeros_like(causal_mask, dtype=qk.dtype) additive_causal_mask = additive_causal_mask.masked_fill((1 - causal_mask).bool(), float('-inf')) qk = qk + additive_causal_mask if padding_mask is not None: padding_mask = 1 - padding_mask padding_mask_all = padding_mask.all(dim=-1, keepdim=True) padding_mask = torch.logical_and(padding_mask, ~padding_mask_all) qk = qk.masked_fill(padding_mask.unsqueeze(2).to(torch.bool), float('-inf')) attn_weights = self.softmax(qk).type_as(qk) return attn_weights def forward(self, input, padding_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[torch.Tensor]]=None, output_attentions=False, use_cache=False): (seq_len, bsz, embed_dim) = input.size() if embed_dim != self.config.hidden_size: raise ValueError(f'Input embedding dimension should be {self.config.hidden_size}; received {embed_dim}') residual = input if self.config.normalize_before_mega: input = self.norm(input) value = self.activation(self.v_proj(input)) if self.config.is_decoder and past_key_values is not None: if seq_len > 1: raise ValueError(f'Incremental decoding only supports self sequence length of 1; received {seq_len}') (prev_self_key, prev_self_value, prev_ema_state) = past_key_values[0:3] else: prev_self_key = prev_self_value = prev_ema_state = None (ema_out, updated_ema_state) = self.ema_gate(input, attention_mask=padding_mask, prev_state=prev_ema_state, use_cache=use_cache) ema_out = self.dropout(ema_out) base = self.mx_proj(ema_out) (residual_weight, query_key_gates, intermediate_state) = torch.split(base, [self.config.hidden_size, self.config.shared_representation_size + self.config.intermediate_size, self.config.hidden_size], dim=-1) residual_weight = torch.sigmoid(residual_weight) query_key_gates = F.silu(query_key_gates) (query_key, attention_gate) = torch.split(query_key_gates, [self.config.shared_representation_size, self.config.intermediate_size], dim=-1) query_key = query_key.unsqueeze(2) * self.qk_weight + self.qk_bias (query, key) = torch.unbind(query_key, dim=2) query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) if self.config.is_decoder: if prev_self_key is not None: key = torch.cat([prev_self_key, key], dim=1) if prev_self_value is not None: value = torch.cat([prev_self_value, value], dim=1) if not self.config.use_chunking: updated_self_key = key updated_self_value = value else: curr_len = key.size(1) % self.config.chunk_size if curr_len == 0: updated_self_key = None updated_self_value = None else: updated_self_key = key updated_self_value = value ctx_len = key.size(1) if not self.config.use_chunking: query = query.unsqueeze(1) key = key.unsqueeze(1) value = value.unsqueeze(1) if padding_mask is not None: padding_mask = padding_mask.unsqueeze(1) else: if seq_len < self.config.chunk_size: query = query.unsqueeze(1) else: n_chunks = seq_len // self.config.chunk_size query = query.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) if ctx_len < self.config.chunk_size: key = key.unsqueeze(1) value = value.unsqueeze(1) if padding_mask is not None: padding_mask = padding_mask.unsqueeze(1) else: n_chunks = ctx_len // self.config.chunk_size key = key.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) value = value.reshape(bsz, n_chunks, self.config.chunk_size, self.config.intermediate_size) if padding_mask is not None: padding_mask = padding_mask.view(bsz, n_chunks, self.config.chunk_size) if padding_mask is not None and padding_mask.dim() == 0: padding_mask = None attn_weights = self.attention_function(query, key, padding_mask=padding_mask, causal_mask=causal_mask) value = self.hidden_dropout(value, batch_first=True) kernel = self.attention_dropout(attn_weights) weighted_self_output = torch.matmul(kernel, value).view(bsz, seq_len, self.config.intermediate_size).transpose(0, 1) weighted_self_output = self.activation(intermediate_state + self.h_proj(weighted_self_output * attention_gate)) weighted_self_output = self.dropout(weighted_self_output) out = torch.addcmul(residual, residual_weight, weighted_self_output - residual) if not self.config.normalize_before_mega: out = self.norm(out) return_values = (out, attn_weights) if output_attentions else (out,) if self.config.is_decoder: return_values = return_values + (updated_self_key, updated_self_value, updated_ema_state) return return_values class MegaNormalizedFeedForwardNetwork(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.config = config self.hidden_dim = config.nffn_hidden_size self.act_fn = config.activation self.activation = ACT2FN[config.activation] self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) self.hidden_dropout = MegaDropout(self.config.nffn_activation_dropout_prob, is_featurewise=self.config.use_feature_dropout) self.prenorm = self.config.normalize_before_ffn self.norm = MegaSequenceNorm(self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine) self.fc1 = nn.Linear(self.config.hidden_size, self.config.nffn_hidden_size) self.fc2 = nn.Linear(self.config.nffn_hidden_size, self.config.hidden_size) def forward(self, inputs): residual = inputs if self.prenorm: inputs = self.norm(inputs) hidden = self.activation(self.fc1(inputs)) hidden = self.hidden_dropout(hidden) output = self.fc2(hidden) output = self.dropout(output) output = output + residual if not self.prenorm: output = self.norm(output) return output class MegaBlock(nn.Module): def __init__(self, config: MegaConfig): super().__init__() self.seq_len_dim = 1 self.mega_layer = MegaMovingAverageGatedAttention(config) self.nffn = MegaNormalizedFeedForwardNetwork(config) if config.use_normalized_ffn else None self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.cross_attn = MegaGatedCrossAttention(config) else: self.cross_attn = None def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, causal_mask: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[torch.FloatTensor]]=None, output_attentions: Optional[bool]=False, use_cache: bool=False) -> Tuple[torch.Tensor]: if use_cache and past_key_value is not None and (attention_mask is not None): mega_padding_mask = attention_mask[:, -1].unsqueeze(-1) else: mega_padding_mask = attention_mask mega_outputs = self.mega_layer(input=hidden_states, padding_mask=mega_padding_mask, causal_mask=causal_mask, past_key_values=past_key_value, output_attentions=output_attentions, use_cache=use_cache) new_hidden_states = mega_outputs[0] (self_key, self_value, self_ema_state) = mega_outputs[-3:] if use_cache else (None, None, None) self_attention_weights = mega_outputs[1] if output_attentions else None if self.cross_attn is not None: if encoder_hidden_states is None: raise ValueError('Requested cross-attention without providing encoder hidden states') cross_attn_outputs = self.cross_attn(query=new_hidden_states, key=encoder_hidden_states, value=encoder_hidden_states, key_padding_mask=encoder_attention_mask, past_key_values=past_key_value, output_attentions=output_attentions, use_cache=use_cache) new_hidden_states = cross_attn_outputs[0] (cross_key, cross_value) = cross_attn_outputs[-2:] if use_cache else (None, None) cross_attention_weights = cross_attn_outputs[1] if output_attentions else None if self.nffn is not None: new_hidden_states = self.nffn(new_hidden_states) outs = (new_hidden_states,) if output_attentions: outs = outs + (self_attention_weights,) if self.cross_attn is not None: outs = outs + (cross_attention_weights,) if use_cache: new_key_values = (self_key, self_value, self_ema_state) if self.cross_attn is not None: new_key_values = new_key_values + (cross_key, cross_value) outs = outs + (new_key_values,) return outs class MegaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class MegaPreTrainedModel(PreTrainedModel): config_class = MegaConfig base_model_prefix = 'mega' supports_gradient_checkpointing = False _no_split_modules = ['MegaMovingAverageGatedAttention'] def _init_weights(self, module): if isinstance(module, MegaMultiDimensionDampedEma): with torch.no_grad(): nn.init.normal_(module.damping_factor, mean=0.0, std=self.config.ema_delta_alpha_range) nn.init.normal_(module.decay_factor, mean=0.0, std=self.config.ema_delta_alpha_range) val = torch.ones(self.config.ema_projection_size, 1) if self.config.ema_projection_size > 1: idx = torch.tensor(list(range(1, self.config.ema_projection_size, 2))) val.index_fill_(0, idx, -1.0) module.ema_expansion_matrix.normal_(mean=0.0, std=self.config.ema_beta_range).add_(val) nn.init.normal_(module.kernel_projection_matrix, mean=0.0, std=self.config.ema_gamma_omega_range) nn.init.normal_(module.residual_weight, mean=0.0, std=self.config.ema_gamma_omega_range) elif isinstance(module, MegaSimpleRelativePositionalBias): nn.init.normal_(module.rel_pos_bias, mean=0.0, std=self.config.initializer_range) elif isinstance(module, MegaRotaryRelativePositionalBias): nn.init.normal_(module.alpha, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.b_param, mean=0.0, std=self.config.initializer_range) elif isinstance(module, MegaScaleNorm): if self.config.norm_affine: nn.init.constant_(module.scalar, 1.0) elif isinstance(module, MegaRMSNorm): if self.config.norm_affine: nn.init.constant_(module.weight, 1.0) elif isinstance(module, MegaMovingAverageGatedAttention): nn.init.normal_(module.qk_weight, mean=0.0, std=self.config.initializer_range) nn.init.constant_(module.qk_bias, 0.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) MEGA_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`MegaConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' MEGA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n This parameter can only be used when the model is initialized with `add_token_type_embeddings` parameter\n set to `True`. All the value in this tensor should be always < config.type_vocab_size.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare MEGA Model transformer outputting raw hidden-states without any specific head on top.', MEGA_START_DOCSTRING) class MegaModel(MegaPreTrainedModel): def __init__(self, config: MegaConfig, add_pooling_layer=True): super().__init__(config) self.config = config self.embedding_layer = MegaEmbeddings(config) self.layers = nn.ModuleList([MegaBlock(config) for _ in range(config.num_hidden_layers)]) self.pooler = MegaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embedding_layer.word_embeddings def set_input_embeddings(self, value): self.embedding_layer.word_embeddings = value @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device else: raise ValueError('You have to specify either input_ids or inputs_embeds') if self.config.use_chunking: input_shape = torch.tensor([input_shape[0], self.config.chunk_size]) (batch_size, sequence_length) = input_shape if self.config.use_chunking and sequence_length > self.config.chunk_size: if sequence_length % self.config.chunk_size != 0: raise ValueError(f'config.use_chunking is activated; input sequence length must be shorter than or a multiple of config.chunk_size\nreceived sequence length of {sequence_length} with chunk size {self.config.chunk_size}') if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache temp_mask_for_extension = torch.ones((1, sequence_length), dtype=torch.long, device=device) causal_mask = self.create_extended_attention_mask_for_decoder(input_shape, temp_mask_for_extension) causal_mask = causal_mask.squeeze(0) else: use_cache = False causal_mask = None if past_key_values is not None and len(past_key_values) != self.config.num_hidden_layers: raise ValueError(f'Received past key/value cache with size mismatch; expected {self.config.num_hidden_layers}, received {len(past_key_values)}') embedding_output = self.embedding_layer(input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) hidden_states = embedding_output.transpose(0, 1) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.transpose(0, 1) all_hidden_states = (embedding_output,) if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, mega_layer) in enumerate(self.layers): current_decoder_cache = past_key_values[i] if past_key_values is not None else None mega_outputs = mega_layer(hidden_states=hidden_states, attention_mask=attention_mask, causal_mask=causal_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=current_decoder_cache, output_attentions=output_attentions, use_cache=use_cache) hidden_states = mega_outputs[0] if output_hidden_states: all_hidden_states += (hidden_states.transpose(0, 1),) if output_attentions: self_attn_weights = mega_outputs[1] all_self_attentions += (self_attn_weights,) if self.config.add_cross_attention: cross_attn_weights = mega_outputs[2] all_cross_attentions += (cross_attn_weights,) if use_cache: updated_cache = mega_outputs[-1] next_decoder_cache += (updated_cache,) hidden_states = hidden_states.transpose(0, 1) pooled_output = self.pooler(hidden_states) if self.pooler is not None else None if not return_dict: return (hidden_states, pooled_output) + (all_hidden_states, next_decoder_cache, all_self_attentions, all_cross_attentions) return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=hidden_states, pooler_output=pooled_output, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) @add_start_docstrings('MEGA Model with a `language modeling` head on top for CLM fine-tuning.', MEGA_START_DOCSTRING) class MegaForCausalLM(MegaPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: MegaConfig): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `MegaForCausalLM` as a standalone, add `is_decoder=True.`') self.mega = MegaModel(config, add_pooling_layer=False) if config.add_lm_hidden_dense_layer: self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.hidden_activation = nn.Tanh() else: self.dense = None self.hidden_activation = None self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Tuple[Tuple[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] if self.dense is not None: sequence_output = self.dense(sequence_output) sequence_output = self.hidden_activation(sequence_output) prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('MEGA Model with a `language modeling` head on top.', MEGA_START_DOCSTRING) class MegaForMaskedLM(MegaPreTrainedModel): _tied_weights_keys = ['mlm_head.weight'] def __init__(self, config: MegaConfig): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `MegaForMaskedLM`, set `config.is_decoder=False` for bi-directional self-attention.') self.mega = MegaModel(config, add_pooling_layer=False) if config.add_lm_hidden_dense_layer: self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.hidden_activation = nn.Tanh() else: self.dense = None self.hidden_activation = None self.mlm_head = nn.Linear(config.hidden_size, config.vocab_size) self.dropout = nn.Dropout(config.dropout_prob) self.post_init() def get_output_embeddings(self): return self.mlm_head def set_output_embeddings(self, new_embeddings): self.mlm_head = new_embeddings @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='', expected_output="' Paris'", expected_loss=0.1) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] if self.dense is not None: sequence_output = self.dense(sequence_output) sequence_output = self.hidden_activation(sequence_output) prediction_scores = self.mlm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n MEGA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', MEGA_START_DOCSTRING) class MegaForSequenceClassification(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.mega = MegaModel(config, add_pooling_layer=False) self.classifier = MegaClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n MEGA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', MEGA_START_DOCSTRING) class MegaForMultipleChoice(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.mega = MegaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.mega(flat_input_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n MEGA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', MEGA_START_DOCSTRING) class MegaForTokenClassification(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mega = MegaModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class MegaClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings('\n MEGA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', MEGA_START_DOCSTRING) class MegaForQuestionAnswering(MegaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mega = MegaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mega(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/mmbt/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_mmbt'] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/mmbt/configuration_mmbt.py """""" from ....utils import logging logger = logging.get_logger(__name__) class MMBTConfig: def __init__(self, config, num_labels=None, modal_hidden_size=2048): self.__dict__ = config.__dict__ self.modal_hidden_size = modal_hidden_size if num_labels: self.num_labels = num_labels # File: transformers-main/src/transformers/models/deprecated/mmbt/modeling_mmbt.py """""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ....modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput from ....modeling_utils import ModuleUtilsMixin from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'MMBTConfig' class ModalEmbeddings(nn.Module): def __init__(self, config, encoder, embeddings): super().__init__() self.config = config self.encoder = encoder self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size) self.position_embeddings = embeddings.position_embeddings self.token_type_embeddings = embeddings.token_type_embeddings self.word_embeddings = embeddings.word_embeddings self.LayerNorm = embeddings.LayerNorm self.dropout = nn.Dropout(p=config.hidden_dropout_prob) def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None): token_embeddings = self.proj_embeddings(self.encoder(input_modal)) seq_length = token_embeddings.size(1) if start_token is not None: start_token_embeds = self.word_embeddings(start_token) seq_length += 1 token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1) if end_token is not None: end_token_embeds = self.word_embeddings(end_token) seq_length += 1 token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device) position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length) if token_type_ids is None: token_type_ids = torch.zeros((input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = token_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings MMBT_START_DOCSTRING = "\n MMBT model was proposed in [Supervised Multimodal Bitransformers for Classifying Images and\n Text](https://github.com/facebookresearch/mmbt) by Douwe Kiela, Suvrat Bhooshan, Hamed Firooz, Davide Testuggine.\n It's a supervised multimodal bitransformer model that fuses information from text and other image encoders, and\n obtain state-of-the-art performance on various multimodal classification benchmark tasks.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`MMBTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration.\n transformer (`nn.Module`): A text transformer that is used by MMBT.\n It should have embeddings, encoder, and pooler attributes.\n encoder (`nn.Module`): Encoder for the second modality.\n It should take in a batch of modal inputs and return k, n dimension embeddings.\n" MMBT_INPUTS_DOCSTRING = "\n Args:\n input_modal (`torch.FloatTensor` of shape `(batch_size, ***)`):\n The other modality data. It will be the shape that the encoder for that type expects. e.g. With an Image\n Encoder, the shape would be (batch_size, channels, height, width)\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. It does not expect [CLS] token to be added as it's\n appended to the end of other modality embeddings. Indices can be obtained using [`AutoTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n modal_start_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Optional start token to be added to Other Modality Embedding. [CLS] Most commonly used for classification\n tasks.\n modal_end_tokens (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Optional end token to be added to Other Modality Embedding. [SEP] Most commonly used.\n attention_mask (*optional*) `torch.FloatTensor` of shape `(batch_size, sequence_length)`:\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, sequence_length)`:\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n modal_token_type_ids (*optional*) `torch.LongTensor` of shape `(batch_size, modal_sequence_length)`:\n Segment token indices to indicate different portions of the non-text modality. The embeddings from these\n tokens will be summed with the respective token embeddings for the non-text modality.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n modal_position_ids (`torch.LongTensor` of shape `(batch_size, modal_sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings for the non-text modality.\n Selected in the range `[0, config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, embedding_dim)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare MMBT Model outputting raw hidden-states without any specific head on top.', MMBT_START_DOCSTRING) class MMBTModel(nn.Module, ModuleUtilsMixin): def __init__(self, config, transformer, encoder): super().__init__() self.config = config self.transformer = transformer self.modal_encoder = ModalEmbeddings(config, encoder, transformer.embeddings) @add_start_docstrings_to_model_forward(MMBT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_txt_shape = input_ids.size() elif inputs_embeds is not None: input_txt_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device modal_embeddings = self.modal_encoder(input_modal, start_token=modal_start_tokens, end_token=modal_end_tokens, position_ids=modal_position_ids, token_type_ids=modal_token_type_ids) input_modal_shape = modal_embeddings.size()[:-1] if token_type_ids is None: token_type_ids = torch.ones(input_txt_shape, dtype=torch.long, device=device) txt_embeddings = self.transformer.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) embedding_output = torch.cat([modal_embeddings, txt_embeddings], 1) input_shape = embedding_output.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) else: attention_mask = torch.cat([torch.ones(input_modal_shape, device=device, dtype=torch.long), attention_mask], dim=1) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(input_shape, device=device) else: encoder_attention_mask = torch.cat([torch.ones(input_modal_shape, device=device), encoder_attention_mask], dim=1) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.transformer.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.transformer.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings('\n MMBT Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n ', MMBT_START_DOCSTRING, MMBT_INPUTS_DOCSTRING) class MMBTForClassification(nn.Module): def __init__(self, config, transformer, encoder): super().__init__() self.num_labels = config.num_labels self.mmbt = MMBTModel(config, transformer, encoder) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mmbt(input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/nat/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_nat': ['NatConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_nat'] = ['NatForImageClassification', 'NatModel', 'NatPreTrainedModel', 'NatBackbone'] if TYPE_CHECKING: from .configuration_nat import NatConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nat import NatBackbone, NatForImageClassification, NatModel, NatPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/nat/configuration_nat.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging from ....utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class NatConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'nat' attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) # File: transformers-main/src/transformers/models/deprecated/nat/modeling_nat.py """""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BackboneOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ....utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends from ....utils.backbone_utils import BackboneMixin from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'NatConfig' _CHECKPOINT_FOR_DOC = 'shi-labs/nat-mini-in1k-224' _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] _IMAGE_CLASS_CHECKPOINT = 'shi-labs/nat-mini-in1k-224' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tiger cat' @dataclass class NatEncoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatImageClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None class NatEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() patch_size = config.patch_size (num_channels, hidden_size) = (config.num_channels, config.embed_dim) self.num_channels = num_channels if patch_size == 4: pass else: raise ValueError('Dinat only supports patch size of 4 at the moment.') self.projection = nn.Sequential(nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: (_, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class NatDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size self.rpb = nn.Parameter(torch.zeros(num_heads, 2 * self.kernel_size - 1, 2 * self.kernel_size - 1)) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = query_layer / math.sqrt(self.attention_head_size) attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) self.layer_scale_parameters = nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, torch.Tensor]: (batch_size, height, width, channels) = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) (hidden_states, pad_values) = self.maybe_pad(hidden_states, height, width) (_, height_pad, width_pad, _) = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList([NatLayer(config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i]) for i in range(depth)]) if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: (_, height, width, _) = hidden_states.size() for (i, layer_module) in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList([NatStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=NatDownsampler if i_layer < self.num_levels - 1 else None) for i_layer in range(self.num_levels)]) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for (i, layer_module) in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return NatEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states) class NatPreTrainedModel(PreTrainedModel): config_class = NatConfig base_model_prefix = 'nat' main_input_name = 'pixel_values' def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) NAT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`NatConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' NAT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]\n for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare Nat Model transformer outputting raw hidden-states without any specific head on top.', NAT_START_DOCSTRING) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ['natten']) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states) @add_start_docstrings('\n Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ', NAT_START_DOCSTRING) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ['natten']) self.num_labels = config.num_labels self.nat = NatModel(config) self.classifier = nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, NatImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return NatImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states) @add_start_docstrings('NAT backbone, to be used with frameworks like DETR and MaskFormer.', NAT_START_DOCSTRING) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ['natten']) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))] hidden_states_norms = {} for (stage, num_channels) in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for (stage, hidden_state) in zip(self.stage_names, hidden_states): if stage in self.out_features: (batch_size, num_channels, height, width) = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/nezha/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_nezha': ['NezhaConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_nezha'] = ['NezhaForNextSentencePrediction', 'NezhaForMaskedLM', 'NezhaForPreTraining', 'NezhaForMultipleChoice', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel'] if TYPE_CHECKING: from .configuration_nezha import NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/nezha/configuration_nezha.py from .... import PretrainedConfig class NezhaConfig(PretrainedConfig): model_type = 'nezha' def __init__(self, vocab_size=21128, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, max_relative_position=64, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, classifier_dropout=0.1, pad_token_id=0, bos_token_id=2, eos_token_id=3, use_cache=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_relative_position = max_relative_position self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.use_cache = use_cache # File: transformers-main/src/transformers/models/deprecated/nezha/modeling_nezha.py """""" import math import os import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_nezha import NezhaConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'sijunhe/nezha-cn-base' _CONFIG_FOR_DOC = 'NezhaConfig' def load_tf_weights_in_nezha(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class NezhaRelativePositionsEncoding(nn.Module): def __init__(self, length, depth, max_relative_position=127): super().__init__() vocab_size = max_relative_position * 2 + 1 range_vec = torch.arange(length) range_mat = range_vec.repeat(length).view(length, length) distance_mat = range_mat - torch.t(range_mat) distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position) final_mat = distance_mat_clipped + max_relative_position embeddings_table = torch.zeros(vocab_size, depth) position = torch.arange(0, vocab_size, dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth)) embeddings_table[:, 0::2] = torch.sin(position * div_term) embeddings_table[:, 1::2] = torch.cos(position * div_term) flat_relative_positions_matrix = final_mat.view(-1) one_hot_relative_positions_matrix = torch.nn.functional.one_hot(flat_relative_positions_matrix, num_classes=vocab_size).float() positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table) my_shape = list(final_mat.size()) my_shape.append(depth) positions_encoding = positions_encoding.view(my_shape) self.register_buffer('positions_encoding', positions_encoding, persistent=False) def forward(self, length): return self.positions_encoding[:length, :length, :] class NezhaEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('token_type_ids', torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NezhaSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.relative_positions_encoding = NezhaRelativePositionsEncoding(length=config.max_position_embeddings, depth=self.attention_head_size, max_relative_position=config.max_relative_position) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) (batch_size, num_attention_heads, from_seq_length, to_seq_length) = attention_scores.size() relations_keys = self.relative_positions_encoding(to_seq_length) query_layer_t = query_layer.permute(2, 0, 1, 3) query_layer_r = query_layer_t.contiguous().view(from_seq_length, batch_size * num_attention_heads, self.attention_head_size) key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1)) key_position_scores_r = key_position_scores.view(from_seq_length, batch_size, num_attention_heads, from_seq_length) key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3) attention_scores = attention_scores + key_position_scores_r_t attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) relations_values = self.relative_positions_encoding(to_seq_length) attention_probs_t = attention_probs.permute(2, 0, 1, 3) attentions_probs_r = attention_probs_t.contiguous().view(from_seq_length, batch_size * num_attention_heads, to_seq_length) value_position_scores = torch.matmul(attentions_probs_r, relations_values) value_position_scores_r = value_position_scores.view(from_seq_length, batch_size, num_attention_heads, self.attention_head_size) value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3) context_layer = context_layer + value_position_scores_r_t context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class NezhaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NezhaAttention(nn.Module): def __init__(self, config): super().__init__() self.self = NezhaSelfAttention(config) self.output = NezhaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class NezhaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NezhaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NezhaLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = NezhaAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = NezhaAttention(config) self.intermediate = NezhaIntermediate(config) self.output = NezhaOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class NezhaEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([NezhaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class NezhaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class NezhaPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class NezhaLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = NezhaPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class NezhaOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = NezhaLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class NezhaOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class NezhaPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = NezhaLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class NezhaPreTrainedModel(PreTrainedModel): config_class = NezhaConfig load_tf_weights = load_tf_weights_in_nezha base_model_prefix = 'nezha' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class NezhaForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None NEZHA_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`NezhaConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' NEZHA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.', NEZHA_START_DOCSTRING) class NezhaModel(NezhaPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = NezhaEmbeddings(config) self.encoder = NezhaEncoder(config) self.pooler = NezhaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', NEZHA_START_DOCSTRING) class NezhaForPreTraining(NezhaPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) self.cls = NezhaPreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], NezhaForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] (prediction_scores, seq_relationship_score) = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return NezhaForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Nezha Model with a `language modeling` head on top.', NEZHA_START_DOCSTRING) class NezhaForMaskedLM(NezhaPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `NezhaForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.nezha = NezhaModel(config, add_pooling_layer=False) self.cls = NezhaOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask} @add_start_docstrings('Nezha Model with a `next sentence prediction (classification)` head on top.', NEZHA_START_DOCSTRING) class NezhaForNextSentencePrediction(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) self.cls = NezhaOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForSequenceClassification(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.nezha = NezhaModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForMultipleChoice(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.nezha = NezhaModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] print(pooled_output.shape) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) print(logits.shape) print(num_choices) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', NEZHA_START_DOCSTRING) class NezhaForTokenClassification(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nezha = NezhaModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', NEZHA_START_DOCSTRING) class NezhaForQuestionAnswering(NezhaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nezha = NezhaModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nezha(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/open_llama/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_open_llama': ['OpenLlamaConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_open_llama'] = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_open_llama_fast'] = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_open_llama'] = ['OpenLlamaForCausalLM', 'OpenLlamaModel', 'OpenLlamaPreTrainedModel', 'OpenLlamaForSequenceClassification'] if TYPE_CHECKING: from .configuration_open_llama import OpenLlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from transformers import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from transformers import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_open_llama import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel, OpenLlamaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/open_llama/configuration_open_llama.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class OpenLlamaConfig(PretrainedConfig): model_type = 'open-llama' def __init__(self, vocab_size=100000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, use_memory_efficient_attention=True, hidden_dropout_prob=0.1, attention_dropout_prob=0.1, use_stable_embedding=True, shared_input_output_embedding=True, rope_theta=10000.0, rope_scaling=None, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.use_memory_efficient_attention = kwargs.pop('use_memorry_efficient_attention', use_memory_efficient_attention) self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.use_stable_embedding = use_stable_embedding self.shared_input_output_embedding = shared_input_output_embedding self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") # File: transformers-main/src/transformers/models/deprecated/open_llama/modeling_open_llama.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ....modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_open_llama import OpenLlamaConfig logger = logging.get_logger(__name__) try: from xformers import ops as xops except ImportError: xops = None _CONFIG_FOR_DOC = 'OpenLlamaConfig' class OpenLlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' class OpenLlamaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) self._set_cos_sin_cache(seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return (self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype)) class OpenLlamaLinearScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) t = t / self.scaling_factor freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) class OpenLlamaDynamicNTKScalingRotaryEmbedding(OpenLlamaRotaryEmbedding): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * (self.scaling_factor * seq_len / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class OpenLlamaMLP(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, dropout_prob: float): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] self.dropout = nn.Dropout(dropout_prob) def forward(self, x): out = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return self.dropout(out) class OpenLlamaAttention(nn.Module): def __init__(self, config: OpenLlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.dropout_prob = config.attention_dropout_prob self.rope_theta = config.rope_theta if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = OpenLlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) else: scaling_type = self.config.rope_scaling['type'] scaling_factor = self.config.rope_scaling['factor'] if scaling_type == 'linear': self.rotary_emb = OpenLlamaLinearScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) elif scaling_type == 'dynamic': self.rotary_emb = OpenLlamaDynamicNTKScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] (cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if self.config.use_memory_efficient_attention and xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask(), p=self.dropout_prob) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)) attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class OpenLlamaDecoderLayer(nn.Module): def __init__(self, config: OpenLlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = OpenLlamaAttention(config=config) self.mlp = OpenLlamaMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, dropout_prob=config.hidden_dropout_prob) self.input_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs OPEN_LLAMA_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`OpenLlamaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaPreTrainedModel(PreTrainedModel): config_class = OpenLlamaConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['OpenLlamaDecoderLayer'] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): if self.config.use_stable_embedding: torch.nn.init.xavier_normal_(module.weight.data) else: module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() OPEN_LLAMA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Open-Llama Model outputting raw hidden-states without any specific head on top.', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaModel(OpenLlamaPreTrainedModel): def __init__(self, config: OpenLlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) if config.use_stable_embedding: self.embed_layer_norm = nn.LayerNorm(config.hidden_size) else: self.embed_layer_norm = None self.layers = nn.ModuleList([OpenLlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = OpenLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = input_ids.shape elif inputs_embeds is not None: (batch_size, seq_length, _) = inputs_embeds.shape else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') seq_length_with_past = seq_length past_key_values_length = 0 if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.embed_layer_norm: inputs_embeds = self.embed_layer_norm(inputs_embeds) if self.config.use_memory_efficient_attention and self.training: attention_mask = None elif attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device) input_shape = (batch_size, seq_length) attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, position_ids, None, output_attentions, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) class OpenLlamaForCausalLM(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.model = OpenLlamaModel(config) if config.shared_input_output_embedding: self.lm_head = None else: self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.shared_input_output_embedding: logits = torch.einsum('blh,vh->blv', hidden_states.to(self.model.embed_tokens.weight.device), self.model.embed_tokens.weight) else: logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask}) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('\n The LLaMa Model transformer with a sequence classification head on top (linear layer).\n\n [`OpenLlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', OPEN_LLAMA_START_DOCSTRING) class OpenLlamaForSequenceClassification(OpenLlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = OpenLlamaModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(OPEN_LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/qdqbert/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_qdqbert': ['QDQBertConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_qdqbert'] = ['QDQBertForMaskedLM', 'QDQBertForMultipleChoice', 'QDQBertForNextSentencePrediction', 'QDQBertForQuestionAnswering', 'QDQBertForSequenceClassification', 'QDQBertForTokenClassification', 'QDQBertLayer', 'QDQBertLMHeadModel', 'QDQBertModel', 'QDQBertPreTrainedModel', 'load_tf_weights_in_qdqbert'] if TYPE_CHECKING: from .configuration_qdqbert import QDQBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_qdqbert import QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLayer, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, load_tf_weights_in_qdqbert else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/qdqbert/configuration_qdqbert.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class QDQBertConfig(PretrainedConfig): model_type = 'qdqbert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache # File: transformers-main/src/transformers/models/deprecated/qdqbert/modeling_qdqbert.py """""" import math import os import warnings from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_pytorch_quantization_available, logging, replace_return_docstrings, requires_backends from .configuration_qdqbert import QDQBertConfig logger = logging.get_logger(__name__) if is_pytorch_quantization_available(): try: from pytorch_quantization import nn as quant_nn from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer except OSError: logger.error("QDQBERT model are not usable since `pytorch_quantization` can't be loaded. Please try to reinstall it following the instructions here: https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization.") _CHECKPOINT_FOR_DOC = 'google-bert/bert-base-uncased' _CONFIG_FOR_DOC = 'QDQBertConfig' def load_tf_weights_in_qdqbert(model, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class QDQBertEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class QDQBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(self.matmul_q_input_quantizer(query_layer), self.matmul_k_input_quantizer(key_layer.transpose(-1, -2))) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(self.matmul_a_input_quantizer(attention_probs), self.matmul_v_input_quantizer(value_layer)) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class QDQBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) add_local = self.add_local_input_quantizer(hidden_states) add_residual = self.add_residual_input_quantizer(input_tensor) hidden_states = self.LayerNorm(add_local + add_residual) return hidden_states class QDQBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = QDQBertSelfAttention(config) self.output = QDQBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class QDQBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class QDQBertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = quant_nn.QuantLinear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) add_local = self.add_local_input_quantizer(hidden_states) add_residual = self.add_residual_input_quantizer(input_tensor) hidden_states = self.LayerNorm(add_local + add_residual) return hidden_states class QDQBertLayer(nn.Module): def __init__(self, config): super().__init__() self.seq_len_dim = 1 self.attention = QDQBertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = QDQBertAttention(config) self.intermediate = QDQBertIntermediate(config) self.output = QDQBertOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = self.feed_forward_chunk(attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class QDQBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([QDQBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class QDQBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class QDQBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class QDQBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = QDQBertPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class QDQBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = QDQBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class QDQBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class QDQBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = QDQBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class QDQBertPreTrainedModel(PreTrainedModel): config_class = QDQBertConfig load_tf_weights = load_tf_weights_in_qdqbert base_model_prefix = 'bert' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) QDQBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`QDQBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' QDQBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.', QDQBERT_START_DOCSTRING) class QDQBertModel(QDQBertPreTrainedModel): def __init__(self, config, add_pooling_layer: bool=True): requires_backends(self, 'pytorch_quantization') super().__init__(config) self.config = config self.embeddings = QDQBertEmbeddings(config) self.encoder = QDQBertEncoder(config) self.pooler = QDQBertPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() (batch_size, seq_length) = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] (batch_size, seq_length) = input_shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.', QDQBERT_START_DOCSTRING) class QDQBertLMHeadModel(QDQBertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.weight', 'predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `QDQBertLMHeadModel` as a standalone, add `is_decoder=True.`') self.bert = QDQBertModel(config, add_pooling_layer=False) self.cls = QDQBertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.LongTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids: Optional[torch.LongTensor], past_key_values=None, attention_mask: Optional[torch.Tensor]=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('QDQBERT Model with a `language modeling` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForMaskedLM(QDQBertPreTrainedModel): _tied_weights_keys = ['predictions.decoder.weight', 'predictions.decoder.bias'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `QDQBertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.bert = QDQBertModel(config, add_pooling_layer=False) self.cls = QDQBertOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask} @add_start_docstrings('Bert Model with a `next sentence prediction (classification)` head on top.', QDQBERT_START_DOCSTRING) class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = QDQBertModel(config) self.cls = QDQBertOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, NextSentencePredictorOutput]: if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForSequenceClassification(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = QDQBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForMultipleChoice(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = QDQBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', QDQBERT_START_DOCSTRING) class QDQBertForTokenClassification(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = QDQBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', QDQBERT_START_DOCSTRING) class QDQBertForQuestionAnswering(QDQBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = QDQBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/realm/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_realm': ['RealmConfig'], 'tokenization_realm': ['RealmTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_realm_fast'] = ['RealmTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_realm'] = ['RealmEmbedder', 'RealmForOpenQA', 'RealmKnowledgeAugEncoder', 'RealmPreTrainedModel', 'RealmReader', 'RealmScorer', 'load_tf_weights_in_realm'] _import_structure['retrieval_realm'] = ['RealmRetriever'] if TYPE_CHECKING: from .configuration_realm import RealmConfig from .tokenization_realm import RealmTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_realm import RealmTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_realm import RealmEmbedder, RealmForOpenQA, RealmKnowledgeAugEncoder, RealmPreTrainedModel, RealmReader, RealmScorer, load_tf_weights_in_realm from .retrieval_realm import RealmRetriever else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/realm/configuration_realm.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class RealmConfig(PretrainedConfig): model_type = 'realm' def __init__(self, vocab_size=30522, hidden_size=768, retriever_proj_size=128, num_hidden_layers=12, num_attention_heads=12, num_candidates=8, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, span_hidden_size=256, max_span_width=10, reader_layer_norm_eps=0.001, reader_beam_size=5, reader_seq_len=320, num_block_records=13353718, searcher_beam_size=5000, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.retriever_proj_size = retriever_proj_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_candidates = num_candidates self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.span_hidden_size = span_hidden_size self.max_span_width = max_span_width self.reader_layer_norm_eps = reader_layer_norm_eps self.reader_beam_size = reader_beam_size self.reader_seq_len = reader_seq_len self.num_block_records = num_block_records self.searcher_beam_size = searcher_beam_size # File: transformers-main/src/transformers/models/deprecated/realm/modeling_realm.py """""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ....utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_realm import RealmConfig logger = logging.get_logger(__name__) _EMBEDDER_CHECKPOINT_FOR_DOC = 'google/realm-cc-news-pretrained-embedder' _ENCODER_CHECKPOINT_FOR_DOC = 'google/realm-cc-news-pretrained-encoder' _SCORER_CHECKPOINT_FOR_DOC = 'google/realm-cc-news-pretrained-scorer' _CONFIG_FOR_DOC = 'RealmConfig' def load_tf_weights_in_realm(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): if isinstance(model, RealmReader) and 'reader' not in name: logger.info(f"Skipping {name} as it is not {model.__class__.__name__}'s parameter") continue if (name.startswith('bert') or name.startswith('cls')) and isinstance(model, RealmForOpenQA): name = name.replace('bert/', 'reader/realm/') name = name.replace('cls/', 'reader/cls/') if (name.startswith('bert') or name.startswith('cls')) and isinstance(model, RealmKnowledgeAugEncoder): name = name.replace('bert/', 'realm/') if name.startswith('reader'): reader_prefix = '' if isinstance(model, RealmReader) else 'reader/' name = name.replace('reader/module/bert/', f'{reader_prefix}realm/') name = name.replace('reader/module/cls/', f'{reader_prefix}cls/') name = name.replace('reader/dense/', f'{reader_prefix}qa_outputs/dense_intermediate/') name = name.replace('reader/dense_1/', f'{reader_prefix}qa_outputs/dense_output/') name = name.replace('reader/layer_normalization', f'{reader_prefix}qa_outputs/layer_normalization') if name.startswith('module/module/module/'): embedder_prefix = '' if isinstance(model, RealmEmbedder) else 'embedder/' name = name.replace('module/module/module/module/bert/', f'{embedder_prefix}realm/') name = name.replace('module/module/module/LayerNorm/', f'{embedder_prefix}cls/LayerNorm/') name = name.replace('module/module/module/dense/', f'{embedder_prefix}cls/dense/') name = name.replace('module/module/module/module/cls/predictions/', f'{embedder_prefix}cls/predictions/') name = name.replace('module/module/module/bert/', f'{embedder_prefix}realm/') name = name.replace('module/module/module/cls/predictions/', f'{embedder_prefix}cls/predictions/') elif name.startswith('module/module/'): embedder_prefix = '' if isinstance(model, RealmEmbedder) else 'embedder/' name = name.replace('module/module/LayerNorm/', f'{embedder_prefix}cls/LayerNorm/') name = name.replace('module/module/dense/', f'{embedder_prefix}cls/dense/') name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape, f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class RealmEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class RealmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class RealmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states REALM_SELF_ATTENTION_CLASSES = {'eager': RealmSelfAttention} class RealmAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = REALM_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = RealmSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class RealmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class RealmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class RealmLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RealmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = RealmAttention(config, position_embedding_type='absolute') self.intermediate = RealmIntermediate(config) self.output = RealmOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class RealmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([RealmLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class RealmPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @dataclass class RealmEmbedderOutput(ModelOutput): projected_score: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class RealmScorerOutput(ModelOutput): relevance_score: torch.FloatTensor = None query_score: torch.FloatTensor = None candidate_score: torch.FloatTensor = None @dataclass class RealmReaderOutput(ModelOutput): loss: torch.FloatTensor = None retriever_loss: torch.FloatTensor = None reader_loss: torch.FloatTensor = None retriever_correct: torch.BoolTensor = None reader_correct: torch.BoolTensor = None block_idx: torch.LongTensor = None candidate: torch.LongTensor = None start_pos: torch.int32 = None end_pos: torch.int32 = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class RealmForOpenQAOutput(ModelOutput): reader_output: dict = None predicted_answer_ids: torch.LongTensor = None class RealmPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class RealmLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = RealmPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class RealmOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class RealmScorerProjection(nn.Module): def __init__(self, config): super().__init__() self.predictions = RealmLMPredictionHead(config) self.dense = nn.Linear(config.hidden_size, config.retriever_proj_size) self.LayerNorm = nn.LayerNorm(config.retriever_proj_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class RealmReaderProjection(nn.Module): def __init__(self, config): super().__init__() self.config = config self.dense_intermediate = nn.Linear(config.hidden_size, config.span_hidden_size * 2) self.dense_output = nn.Linear(config.span_hidden_size, 1) self.layer_normalization = nn.LayerNorm(config.span_hidden_size, eps=config.reader_layer_norm_eps) self.relu = nn.ReLU() def forward(self, hidden_states, block_mask): def span_candidates(masks): (_, max_sequence_len) = masks.shape def _spans_given_width(width): current_starts = torch.arange(max_sequence_len - width + 1, device=masks.device) current_ends = torch.arange(width - 1, max_sequence_len, device=masks.device) return (current_starts, current_ends) (starts, ends) = zip(*(_spans_given_width(w + 1) for w in range(self.config.max_span_width))) starts = torch.cat(starts, 0) ends = torch.cat(ends, 0) start_masks = torch.index_select(masks, dim=-1, index=starts) end_masks = torch.index_select(masks, dim=-1, index=ends) span_masks = start_masks * end_masks return (starts, ends, span_masks) def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min hidden_states = self.dense_intermediate(hidden_states) (start_projection, end_projection) = hidden_states.chunk(2, dim=-1) (candidate_starts, candidate_ends, candidate_mask) = span_candidates(block_mask) candidate_start_projections = torch.index_select(start_projection, dim=1, index=candidate_starts) candidate_end_projections = torch.index_select(end_projection, dim=1, index=candidate_ends) candidate_hidden = candidate_start_projections + candidate_end_projections candidate_hidden = self.relu(candidate_hidden) candidate_hidden = self.layer_normalization(candidate_hidden) reader_logits = self.dense_output(candidate_hidden).squeeze(-1) reader_logits += mask_to_score(candidate_mask, dtype=reader_logits.dtype) return (reader_logits, candidate_starts, candidate_ends) REALM_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RealmConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' REALM_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class RealmPreTrainedModel(PreTrainedModel): config_class = RealmConfig load_tf_weights = load_tf_weights_in_realm base_model_prefix = 'realm' def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _flatten_inputs(self, *inputs): flattened_inputs = [] for tensor in inputs: if tensor is None: flattened_inputs.append(None) else: input_shape = tensor.shape if len(input_shape) > 2: tensor = tensor.view((-1, input_shape[-1])) flattened_inputs.append(tensor) return flattened_inputs class RealmBertModel(RealmPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = RealmEmbeddings(config) self.encoder = RealmEncoder(config) self.pooler = RealmPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('The embedder of REALM outputting projected score that will be used to calculate relevance score.', REALM_START_DOCSTRING) class RealmEmbedder(RealmPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias'] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmScorerProjection(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmEmbedderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmEmbedderOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict realm_outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooler_output = realm_outputs[1] projected_score = self.cls(pooler_output) if not return_dict: return (projected_score,) + realm_outputs[2:4] else: return RealmEmbedderOutput(projected_score=projected_score, hidden_states=realm_outputs.hidden_states, attentions=realm_outputs.attentions) @add_start_docstrings('The scorer of REALM outputting relevance scores representing the score of document candidates (before softmax).', REALM_START_DOCSTRING) class RealmScorer(RealmPreTrainedModel): def __init__(self, config, query_embedder=None): super().__init__(config) self.embedder = RealmEmbedder(self.config) self.query_embedder = query_embedder if query_embedder is not None else self.embedder self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=RealmScorerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, candidate_input_ids: Optional[torch.LongTensor]=None, candidate_attention_mask: Optional[torch.FloatTensor]=None, candidate_token_type_ids: Optional[torch.LongTensor]=None, candidate_inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmScorerOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('You have to specify either input_ids or input_embeds.') if candidate_input_ids is None and candidate_inputs_embeds is None: raise ValueError('You have to specify either candidate_input_ids or candidate_inputs_embeds.') query_outputs = self.query_embedder(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (flattened_input_ids, flattened_attention_mask, flattened_token_type_ids) = self._flatten_inputs(candidate_input_ids, candidate_attention_mask, candidate_token_type_ids) candidate_outputs = self.embedder(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=candidate_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_score = query_outputs[0] candidate_score = candidate_outputs[0] candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size) relevance_score = torch.einsum('bd,bnd->bn', query_score, candidate_score) if not return_dict: return (relevance_score, query_score, candidate_score) return RealmScorerOutput(relevance_score=relevance_score, query_score=query_score, candidate_score=candidate_score) @add_start_docstrings('The knowledge-augmented encoder of REALM outputting masked language model logits and marginal log-likelihood loss.', REALM_START_DOCSTRING) class RealmKnowledgeAugEncoder(RealmPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder'] def __init__(self, config): super().__init__(config) self.realm = RealmBertModel(self.config) self.cls = RealmOnlyMLMHead(self.config) self.post_init() def get_input_embeddings(self): return self.realm.embeddings.word_embeddings def set_input_embeddings(self, value): self.realm.embeddings.word_embeddings = value def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, num_candidates, sequence_length')) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, mlm_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and relevance_score is None: raise ValueError('You have to specify `relevance_score` when `labels` is specified in order to compute loss.') (flattened_input_ids, flattened_attention_mask, flattened_token_type_ids) = self._flatten_inputs(input_ids, attention_mask, token_type_ids) joint_outputs = self.realm(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) joint_output = joint_outputs[0] prediction_scores = self.cls(joint_output) candidate_score = relevance_score masked_lm_loss = None if labels is not None: (batch_size, seq_length) = labels.size() if mlm_mask is None: mlm_mask = torch.ones_like(labels, dtype=torch.float32) else: mlm_mask = mlm_mask.type(torch.float32) loss_fct = CrossEntropyLoss(reduction='none') mlm_logits = prediction_scores.view(-1, self.config.vocab_size) mlm_targets = labels.tile(1, self.config.num_candidates).view(-1) masked_lm_log_prob = -loss_fct(mlm_logits, mlm_targets).view(batch_size, self.config.num_candidates, seq_length) candidate_log_prob = candidate_score.log_softmax(-1).unsqueeze(-1) joint_gold_log_prob = candidate_log_prob + masked_lm_log_prob marginal_gold_log_probs = joint_gold_log_prob.logsumexp(1) masked_lm_loss = -torch.nansum(torch.sum(marginal_gold_log_probs * mlm_mask) / torch.sum(mlm_mask)) if not return_dict: output = (prediction_scores,) + joint_outputs[2:4] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=joint_outputs.hidden_states, attentions=joint_outputs.attentions) @add_start_docstrings('The reader of REALM.', REALM_START_DOCSTRING) class RealmReader(RealmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.realm = RealmBertModel(config) self.cls = RealmOnlyMLMHead(config) self.qa_outputs = RealmReaderProjection(config) self.post_init() @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('reader_beam_size, sequence_length')) @replace_return_docstrings(output_type=RealmReaderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, relevance_score: Optional[torch.FloatTensor]=None, block_mask: Optional[torch.BoolTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, has_answers: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmReaderOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if relevance_score is None: raise ValueError('You have to specify `relevance_score` to calculate logits and loss.') if block_mask is None: raise ValueError('You have to specify `block_mask` to separate question block and evidence block.') if token_type_ids.size(1) < self.config.max_span_width: raise ValueError('The input sequence length must be greater than or equal to config.max_span_width.') outputs = self.realm(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] (reader_logits, candidate_starts, candidate_ends) = self.qa_outputs(sequence_output, block_mask[0:self.config.reader_beam_size]) retriever_logits = torch.unsqueeze(relevance_score[0:self.config.reader_beam_size], -1) reader_logits += retriever_logits predicted_block_index = torch.argmax(torch.max(reader_logits, dim=1).values) predicted_candidate = torch.argmax(torch.max(reader_logits, dim=0).values) predicted_start = torch.index_select(candidate_starts, dim=0, index=predicted_candidate) predicted_end = torch.index_select(candidate_ends, dim=0, index=predicted_candidate) total_loss = None retriever_loss = None reader_loss = None retriever_correct = None reader_correct = None if start_positions is not None and end_positions is not None and (has_answers is not None): def compute_correct_candidates(candidate_starts, candidate_ends, gold_starts, gold_ends): is_gold_start = torch.eq(torch.unsqueeze(torch.unsqueeze(candidate_starts, 0), 0), torch.unsqueeze(gold_starts, -1)) is_gold_end = torch.eq(torch.unsqueeze(torch.unsqueeze(candidate_ends, 0), 0), torch.unsqueeze(gold_ends, -1)) return torch.any(torch.logical_and(is_gold_start, is_gold_end), 1) def marginal_log_loss(logits, is_correct): def mask_to_score(mask, dtype=torch.float32): return (1.0 - mask.type(dtype)) * torch.finfo(dtype).min log_numerator = torch.logsumexp(logits + mask_to_score(is_correct, dtype=logits.dtype), dim=-1) log_denominator = torch.logsumexp(logits, dim=-1) return log_denominator - log_numerator ignored_index = sequence_output.size(1) start_positions = start_positions.clamp(-1, ignored_index) end_positions = end_positions.clamp(-1, ignored_index) retriever_correct = has_answers any_retriever_correct = torch.any(retriever_correct) reader_correct = compute_correct_candidates(candidate_starts=candidate_starts, candidate_ends=candidate_ends, gold_starts=start_positions[0:self.config.reader_beam_size], gold_ends=end_positions[0:self.config.reader_beam_size]) any_reader_correct = torch.any(reader_correct) retriever_loss = marginal_log_loss(relevance_score, retriever_correct) reader_loss = marginal_log_loss(reader_logits.view(-1), reader_correct.view(-1)) retriever_loss *= any_retriever_correct.type(torch.float32) reader_loss *= any_reader_correct.type(torch.float32) total_loss = (retriever_loss + reader_loss).mean() if not return_dict: output = (predicted_block_index, predicted_candidate, predicted_start, predicted_end) + outputs[2:] return (total_loss, retriever_loss, reader_loss, retriever_correct, reader_correct) + output if total_loss is not None else output return RealmReaderOutput(loss=total_loss, retriever_loss=retriever_loss, reader_loss=reader_loss, retriever_correct=retriever_correct, reader_correct=reader_correct, block_idx=predicted_block_index, candidate=predicted_candidate, start_pos=predicted_start, end_pos=predicted_end, hidden_states=outputs.hidden_states, attentions=outputs.attentions) REALM_FOR_OPEN_QA_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token (should not be used in this model by design).\n\n [What are token type IDs?](../glossary#token-type-ids)\n answer_ids (`list` of shape `(num_answers, answer_length)`, *optional*):\n Answer ids for computing the marginal log-likelihood loss. Indices should be in `[-1, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-1` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('`RealmForOpenQA` for end-to-end open domain question answering.', REALM_START_DOCSTRING) class RealmForOpenQA(RealmPreTrainedModel): def __init__(self, config, retriever=None): super().__init__(config) self.embedder = RealmEmbedder(config) self.reader = RealmReader(config) self.register_buffer('block_emb', torch.zeros(()).new_empty(size=(config.num_block_records, config.retriever_proj_size), dtype=torch.float32, device=torch.device('cpu'))) self.retriever = retriever self.post_init() @property def searcher_beam_size(self): if self.training: return self.config.searcher_beam_size return self.config.reader_beam_size def block_embedding_to(self, device): self.block_emb = self.block_emb.to(device) @add_start_docstrings_to_model_forward(REALM_FOR_OPEN_QA_DOCSTRING.format('1, sequence_length')) @replace_return_docstrings(output_type=RealmForOpenQAOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor], attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, answer_ids: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmForOpenQAOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and input_ids.shape[0] != 1: raise ValueError('The batch_size of the inputs must be 1.') question_outputs = self.embedder(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, return_dict=True) question_projection = question_outputs[0] batch_scores = torch.einsum('BD,QD->QB', self.block_emb, question_projection.to(self.block_emb.device)) (_, retrieved_block_ids) = torch.topk(batch_scores, k=self.searcher_beam_size, dim=-1) retrieved_block_ids = retrieved_block_ids.squeeze() retrieved_block_emb = torch.index_select(self.block_emb, dim=0, index=retrieved_block_ids) (has_answers, start_pos, end_pos, concat_inputs) = self.retriever(retrieved_block_ids.cpu(), input_ids, answer_ids, max_length=self.config.reader_seq_len) concat_inputs = concat_inputs.to(self.reader.device) block_mask = concat_inputs.special_tokens_mask.type(torch.bool).to(device=self.reader.device) block_mask.logical_not_().logical_and_(concat_inputs.token_type_ids.type(torch.bool)) if has_answers is not None: has_answers = torch.tensor(has_answers, dtype=torch.bool, device=self.reader.device) start_pos = torch.tensor(start_pos, dtype=torch.long, device=self.reader.device) end_pos = torch.tensor(end_pos, dtype=torch.long, device=self.reader.device) retrieved_logits = torch.einsum('D,BD->B', question_projection.squeeze(), retrieved_block_emb.to(self.reader.device)) reader_output = self.reader(input_ids=concat_inputs.input_ids[0:self.config.reader_beam_size], attention_mask=concat_inputs.attention_mask[0:self.config.reader_beam_size], token_type_ids=concat_inputs.token_type_ids[0:self.config.reader_beam_size], relevance_score=retrieved_logits, block_mask=block_mask, has_answers=has_answers, start_positions=start_pos, end_positions=end_pos, return_dict=True) predicted_block = concat_inputs.input_ids[reader_output.block_idx] predicted_answer_ids = predicted_block[reader_output.start_pos:reader_output.end_pos + 1] if not return_dict: return (reader_output, predicted_answer_ids) return RealmForOpenQAOutput(reader_output=reader_output, predicted_answer_ids=predicted_answer_ids) # File: transformers-main/src/transformers/models/deprecated/realm/retrieval_realm.py """""" import os from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from .... import AutoTokenizer from ....utils import logging _REALM_BLOCK_RECORDS_FILENAME = 'block_records.npy' logger = logging.get_logger(__name__) def convert_tfrecord_to_np(block_records_path: str, num_block_records: int) -> np.ndarray: import tensorflow.compat.v1 as tf blocks_dataset = tf.data.TFRecordDataset(block_records_path, buffer_size=512 * 1024 * 1024) blocks_dataset = blocks_dataset.batch(num_block_records, drop_remainder=True) np_record = next(blocks_dataset.take(1).as_numpy_iterator()) return np_record class ScaNNSearcher: def __init__(self, db, num_neighbors, dimensions_per_block=2, num_leaves=1000, num_leaves_to_search=100, training_sample_size=100000): from scann.scann_ops.py.scann_ops_pybind import builder as Builder builder = Builder(db=db, num_neighbors=num_neighbors, distance_measure='dot_product') builder = builder.tree(num_leaves=num_leaves, num_leaves_to_search=num_leaves_to_search, training_sample_size=training_sample_size) builder = builder.score_ah(dimensions_per_block=dimensions_per_block) self.searcher = builder.build() def search_batched(self, question_projection): (retrieved_block_ids, _) = self.searcher.search_batched(question_projection.detach().cpu()) return retrieved_block_ids.astype('int64') class RealmRetriever: def __init__(self, block_records, tokenizer): super().__init__() self.block_records = block_records self.tokenizer = tokenizer def __call__(self, retrieved_block_ids, question_input_ids, answer_ids, max_length=None, return_tensors='pt'): retrieved_blocks = np.take(self.block_records, indices=retrieved_block_ids, axis=0) question = self.tokenizer.decode(question_input_ids[0], skip_special_tokens=True) text = [] text_pair = [] for retrieved_block in retrieved_blocks: text.append(question) text_pair.append(retrieved_block.decode()) concat_inputs = self.tokenizer(text, text_pair, padding=True, truncation=True, return_special_tokens_mask=True, max_length=max_length) concat_inputs_tensors = concat_inputs.convert_to_tensors(return_tensors) if answer_ids is not None: return self.block_has_answer(concat_inputs, answer_ids) + (concat_inputs_tensors,) else: return (None, None, None, concat_inputs_tensors) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *init_inputs, **kwargs): if os.path.isdir(pretrained_model_name_or_path): block_records_path = os.path.join(pretrained_model_name_or_path, _REALM_BLOCK_RECORDS_FILENAME) else: block_records_path = hf_hub_download(repo_id=pretrained_model_name_or_path, filename=_REALM_BLOCK_RECORDS_FILENAME, **kwargs) block_records = np.load(block_records_path, allow_pickle=True) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls(block_records, tokenizer) def save_pretrained(self, save_directory): np.save(os.path.join(save_directory, _REALM_BLOCK_RECORDS_FILENAME), self.block_records) self.tokenizer.save_pretrained(save_directory) def block_has_answer(self, concat_inputs, answer_ids): has_answers = [] start_pos = [] end_pos = [] max_answers = 0 for input_id in concat_inputs.input_ids: input_id_list = input_id.tolist() first_sep_idx = input_id_list.index(self.tokenizer.sep_token_id) second_sep_idx = first_sep_idx + 1 + input_id_list[first_sep_idx + 1:].index(self.tokenizer.sep_token_id) start_pos.append([]) end_pos.append([]) for answer in answer_ids: for idx in range(first_sep_idx + 1, second_sep_idx): if answer[0] == input_id_list[idx]: if input_id_list[idx:idx + len(answer)] == answer: start_pos[-1].append(idx) end_pos[-1].append(idx + len(answer) - 1) if len(start_pos[-1]) == 0: has_answers.append(False) else: has_answers.append(True) if len(start_pos[-1]) > max_answers: max_answers = len(start_pos[-1]) for (start_pos_, end_pos_) in zip(start_pos, end_pos): if len(start_pos_) < max_answers: padded = [-1] * (max_answers - len(start_pos_)) start_pos_ += padded end_pos_ += padded return (has_answers, start_pos, end_pos) # File: transformers-main/src/transformers/models/deprecated/realm/tokenization_realm.py """""" import collections import os import unicodedata from typing import List, Optional, Tuple from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ....tokenization_utils_base import BatchEncoding from ....utils import PaddingStrategy, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class RealmTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = RealmTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def batch_encode_candidates(self, text, **kwargs): kwargs['padding'] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop('text_pair', None) return_tensors = kwargs.pop('return_tensors', None) output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} for (idx, candidate_text) in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get('input_ids') encoded_attention_mask = encoded_candidates.get('attention_mask') encoded_token_type_ids = encoded_candidates.get('token_type_ids') if encoded_input_ids is not None: output_data['input_ids'].append(encoded_input_ids) if encoded_attention_mask is not None: output_data['attention_mask'].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data['token_type_ids'].append(encoded_token_type_ids) output_data = {key: item for (key, item) in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if never_split is not None and text in never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/deprecated/realm/tokenization_realm_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_base import BatchEncoding from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class RealmTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RealmTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def batch_encode_candidates(self, text, **kwargs): kwargs['padding'] = PaddingStrategy.MAX_LENGTH batch_text = text batch_text_pair = kwargs.pop('text_pair', None) return_tensors = kwargs.pop('return_tensors', None) output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []} for (idx, candidate_text) in enumerate(batch_text): if batch_text_pair is not None: candidate_text_pair = batch_text_pair[idx] else: candidate_text_pair = None encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs) encoded_input_ids = encoded_candidates.get('input_ids') encoded_attention_mask = encoded_candidates.get('attention_mask') encoded_token_type_ids = encoded_candidates.get('token_type_ids') if encoded_input_ids is not None: output_data['input_ids'].append(encoded_input_ids) if encoded_attention_mask is not None: output_data['attention_mask'].append(encoded_attention_mask) if encoded_token_type_ids is not None: output_data['token_type_ids'].append(encoded_token_type_ids) output_data = {key: item for (key, item) in output_data.items() if len(item) != 0} return BatchEncoding(output_data, tensor_type=return_tensors) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/deprecated/retribert/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _import_structure = {'configuration_retribert': ['RetriBertConfig'], 'tokenization_retribert': ['RetriBertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_retribert_fast'] = ['RetriBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_retribert'] = ['RetriBertModel', 'RetriBertPreTrainedModel'] if TYPE_CHECKING: from .configuration_retribert import RetriBertConfig from .tokenization_retribert import RetriBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_retribert_fast import RetriBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_retribert import RetriBertModel, RetriBertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/retribert/configuration_retribert.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class RetriBertConfig(PretrainedConfig): model_type = 'retribert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.share_encoders = share_encoders self.projection_dim = projection_dim # File: transformers-main/src/transformers/models/deprecated/retribert/modeling_retribert.py """""" import math from typing import Optional import torch import torch.utils.checkpoint as checkpoint from torch import nn from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging from ...bert.modeling_bert import BertModel from .configuration_retribert import RetriBertConfig logger = logging.get_logger(__name__) class RetriBertPreTrainedModel(PreTrainedModel): config_class = RetriBertConfig load_tf_weights = None base_model_prefix = 'retribert' def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) RETRIBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('Bert Based model to embed queries or document for document retrieval.', RETRIBERT_START_DOCSTRING) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config: RetriBertConfig) -> None: super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = None if config.share_encoders else BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction='mean') self.post_init() def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=-1): if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(attention_mask, input_shape) def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output embedding_output = sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None) pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size:(b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query, checkpoint_batch_size) return self.project_query(q_reps) def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query if self.bert_doc is None else self.bert_doc, checkpoint_batch_size) return self.project_doc(a_reps) def forward(self, input_ids_query: torch.LongTensor, attention_mask_query: Optional[torch.FloatTensor], input_ids_doc: torch.LongTensor, attention_mask_doc: Optional[torch.FloatTensor], checkpoint_batch_size: int=-1) -> torch.FloatTensor: device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss # File: transformers-main/src/transformers/models/deprecated/retribert/tokenization_retribert.py """""" import collections import os import unicodedata from typing import List, Optional, Tuple from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ....utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class RetriBertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/deprecated/retribert/tokenization_retribert_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class RetriBertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RetriBertTokenizer model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/deprecated/speech_to_text_2/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available _import_structure = {'configuration_speech_to_text_2': ['Speech2Text2Config'], 'processing_speech_to_text_2': ['Speech2Text2Processor'], 'tokenization_speech_to_text_2': ['Speech2Text2Tokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_speech_to_text_2'] = ['Speech2Text2ForCausalLM', 'Speech2Text2PreTrainedModel'] if TYPE_CHECKING: from .configuration_speech_to_text_2 import Speech2Text2Config from .processing_speech_to_text_2 import Speech2Text2Processor from .tokenization_speech_to_text_2 import Speech2Text2Tokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text_2 import Speech2Text2ForCausalLM, Speech2Text2PreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/speech_to_text_2/configuration_speech_to_text_2.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class Speech2Text2Config(PretrainedConfig): model_type = 'speech_to_text_2' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, vocab_size=10000, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, decoder_layerdrop=0.0, use_cache=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_target_positions=1024, **kwargs): self.vocab_size = vocab_size self.d_model = d_model self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = decoder_layers self.scale_embedding = scale_embedding self.max_target_positions = max_target_positions super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs) # File: transformers-main/src/transformers/models/deprecated/speech_to_text_2/modeling_speech_to_text_2.py """""" import copy import math from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ....activations import ACT2FN from ....modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging, replace_return_docstrings from .configuration_speech_to_text_2 import Speech2Text2Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'Speech2Text2Config' _CHECKPOINT_FOR_DOC = 'facebook/s2t-wav2vec2-large-en-de' class Speech2Text2SinusoidalPositionalEmbedding(nn.Module): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, 'weights'): emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int]=None): half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int=0): (bsz, seq_len) = input_ids.size() position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids(self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int]=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx class Speech2Text2Attention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[Speech2Text2Config]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class Speech2Text2DecoderLayer(nn.Module): def __init__(self, config: Speech2Text2Config): super().__init__() self.embed_dim = config.d_model self.self_attn = Speech2Text2Attention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if config.is_decoder: self.encoder_attn = Speech2Text2Attention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True): residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class Speech2Text2PreTrainedModel(PreTrainedModel): config_class = Speech2Text2Config base_model_prefix = 'model' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() SPEECH_TO_TEXT_2_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`Speech2Text2Config`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' class Speech2Text2Decoder(Speech2Text2PreTrainedModel): def __init__(self, config: Speech2Text2Config): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2Text2SinusoidalPositionalEmbedding(self.max_target_positions, config.d_model, self.padding_idx) self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The Speech2Text2 Model with a language modeling head. Can be used for summarization.', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2DecoderWrapper(Speech2Text2PreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = Speech2Text2Decoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @add_start_docstrings('The Speech2Text2 Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and [`SpeechEncoderDecoder`].', SPEECH_TO_TEXT_2_START_DOCSTRING) class Speech2Text2ForCausalLM(Speech2Text2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = Speech2Text2DecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], CausalLMOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/deprecated/speech_to_text_2/processing_speech_to_text_2.py """""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class Speech2Text2Processor(ProcessorMixin): feature_extractor_class = 'AutoFeatureExtractor' tokenizer_class = 'Speech2Text2Tokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if 'raw_speech' in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.') audio = kwargs.pop('raw_speech') else: audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False # File: transformers-main/src/transformers/models/deprecated/speech_to_text_2/tokenization_speech_to_text_2.py """""" import json import os from typing import Dict, List, Optional, Tuple from ....tokenization_utils import PreTrainedTokenizer from ....utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt'} BPE_TOKEN_MERGES = '' BPE_TOKEN_VOCAB = '@@ ' def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class Speech2Text2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='', pad_token='', eos_token='', unk_token='', do_lower_case=False, merges_file=None, **kwargs): self.do_lower_case = do_lower_case with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} if merges_file is None: logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.') self.bpe_ranks = None self.cache = None else: with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, **kwargs) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ' + BPE_TOKEN_MERGES: word = '\n' + BPE_TOKEN_MERGES if word.endswith(BPE_TOKEN_MERGES): word = word.replace(BPE_TOKEN_MERGES, '') word = word.replace(' ', BPE_TOKEN_VOCAB) self.cache[token] = word return word def _tokenize(self, text): if self.bpe_ranks is None: raise ValueError('This tokenizer was instantiated without a `merges.txt` file, so that it can only be used for decoding, not for encoding. Make sure to provide `merges.txt` file at instantiation to enable encoding.') if self.do_lower_case: text = text.lower() text = text.split() split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token: str) -> int: return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: List[str]) -> str: string = ' '.join(tokens) string = ''.join(string.split(BPE_TOKEN_VOCAB)) return string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merges_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 if self.bpe_ranks is None: return (vocab_file,) with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merges_file) # File: transformers-main/src/transformers/models/deprecated/tapex/tokenization_tapex.py """""" import json import os import random from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ....file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available from ....tokenization_utils import AddedToken, PreTrainedTokenizer from ....tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy from ....utils import logging if is_pandas_available(): import pandas as pd logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} class TapexTruncationStrategy(ExplicitEnum): DROP_ROWS_TO_FIT = 'drop_rows_to_fit' TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = "\n add_special_tokens (`bool`, *optional*, defaults to `True`):\n Whether or not to encode the sequences with the special tokens relative to their model.\n padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],\n *optional*, defaults to `False`):\n\n Activates and controls truncation. Accepts the following values:\n\n - `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the\n maximum acceptable input length for the model if that argument is not provided. This will truncate\n row by row, removing rows from the table.\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or\n to the maximum acceptable input length for the model if that argument is not provided. This will\n truncate token by token, removing a token from the longest sequence in the pair if a pair of\n sequences (or a batch of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the\n maximum acceptable input length for the model if that argument is not provided. This will only\n truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the\n maximum acceptable input length for the model if that argument is not provided. This will only\n truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to\n `None`, this will use the predefined model maximum length if a maximum length is required by one of the\n truncation/padding parameters. If the model has no specific maximum input length (like XLNet)\n truncation/padding to a maximum length will be deactivated.\n stride (`int`, *optional*, defaults to 0):\n If set to a number along with `max_length`, the overflowing tokens returned when\n `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence\n returned to provide some overlap between truncated and overflowing sequences. The value of this\n argument defines the number of overlapping tokens.\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value. This is especially useful to enable\n the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).\n return_tensors (`str` or [`~file_utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n" @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class IndexedRowTableLinearize: def process_table(self, table_content: Dict): assert 'header' in table_content and 'rows' in table_content, self.PROMPT_MESSAGE table_str = self.process_header(table_content['header']) + ' ' for (i, row_example) in enumerate(table_content['rows']): table_str += self.process_row(row_example, row_index=i + 1) + ' ' return table_str.strip() def process_header(self, headers: List): return 'col : ' + ' | '.join(headers) def process_row(self, row: List, row_index: int): row_str = '' row_cell_values = [] for cell_value in row: if isinstance(cell_value, int): row_cell_values.append(str(cell_value)) else: row_cell_values.append(cell_value) row_str += ' | '.join(row_cell_values) return 'row ' + str(row_index) + ' : ' + row_str class TapexTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, do_lower_case=True, errors='replace', bos_token='', eos_token='', sep_token='
', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, max_cell_length=15, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.do_lower_case = do_lower_case self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(vocab_file=vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, max_cell_length=max_cell_length, **kwargs) self.max_cell_length = max_cell_length self.table_linearize = IndexedRowTableLinearize() def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and (not text[0].isspace())): text = ' ' + text return (text, kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__(self, table: Union['pd.DataFrame', List['pd.DataFrame']]=None, query: Optional[Union[TextInput, List[TextInput]]]=None, answer: Union[str, List[str]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if table is not None: return self.source_call_func(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) elif answer is not None: return self.target_call_func(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: raise ValueError('You need to provide either a `table` or an `answer`.') def source_call_func(self, table: Union['pd.DataFrame', List['pd.DataFrame']], query: Optional[Union[TextInput, List[TextInput]]]=None, answer: Union[str, List[str]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: valid_table = False valid_query = False if isinstance(table, pd.DataFrame): valid_table = True elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame): valid_table = True if query is None or isinstance(query, str): valid_query = True elif isinstance(query, (list, tuple)): if len(query) == 0 or isinstance(query[0], str): valid_query = True if not valid_table: raise ValueError('table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). ') if not valid_query: raise ValueError('query input must of type `str` (single example), `List[str]` (batch of examples). ') is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple)) if is_batched: return self.batch_encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus(self, table: Union['pd.DataFrame', List['pd.DataFrame']], query: Optional[List[TextInput]]=None, answer: List[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: (padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._batch_encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _batch_encode_plus(self, table: Union['pd.DataFrame', List['pd.DataFrame']], query: Optional[List[TextInput]]=None, answer: Optional[List[str]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.') if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)): table = [table] * len(query) if isinstance(table, (list, tuple)) and isinstance(query, str): query = [query] * len(table) batch_outputs = self._batch_prepare_for_model(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model(self, table: Union['pd.DataFrame', List['pd.DataFrame']], query: Optional[Union[TextInput, List[TextInput]]]=None, answer: Optional[Union[str, List[str]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding: batch_outputs = {} if answer is None: answer = [None] * len(table) for (_table, _query, _answer) in zip(table, query, answer): text = self.prepare_table_query(_table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length) if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) outputs = self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for (key, value) in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) def encode(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]: encoded_inputs = self.encode_plus(table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids'] @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: (padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._encode_plus(table=table, query=query, answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _encode_plus(self, table: 'pd.DataFrame', query: Optional[TextInput]=None, answer: Optional[str]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') text = self.prepare_table_query(table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length) if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) return self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) def target_call_func(self, answer: Union[str, List[str]], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: is_batched = isinstance(answer, (list, tuple)) if is_batched: return self.target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def target_batch_encode_plus(self, answer: List[str], add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: (padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_batch_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _target_batch_encode_plus(self, answer: List[str], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: batch_outputs = {} for text in answer: if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) outputs = self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose) for (key, value) in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return BatchEncoding(batch_outputs) def target_encode(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy]=None, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]: encoded_outputs = self.target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, **kwargs) return encoded_outputs['input_ids'] def target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str]=None, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: (padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs) return self._target_encode_plus(answer=answer, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) def _target_encode_plus(self, answer: str, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: if return_offsets_mapping: raise NotImplementedError('return_offset_mapping is not available when using Python tokenizers. To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast. More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674') text = answer if self.do_lower_case: text = text.lower() tokens = self.tokenize(text) return self.prepare_for_model(ids=self.convert_tokens_to_ids(tokens), add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose) def prepare_table_query(self, table, query, answer=None, truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy], max_length=None): if not table.empty: table_content = {'header': list(table.columns), 'rows': [list(row.values) for (i, row) in table.iterrows()]} self.truncate_table_cells(table_content, query, answer) if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT: self.truncate_table_rows(table_content, query, answer, max_length=max_length) linear_table = self.table_linearize.process_table(table_content) else: linear_table = '' if linear_table == '': logger.warning('You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). ' + f'Please carefully check the corresponding table with the query : {query}.') if query == '': logger.warning('You provide nothing to query with respect to the table.') separator = ' ' if query and linear_table else '' joint_input = query + separator + linear_table if query else linear_table return joint_input def truncate_table_cells(self, table_content: Dict, question: str, answer: List): cell_mapping = {} for row in table_content['rows']: for (i, cell) in enumerate(row): truncate_cell = self.truncate_cell(cell) if truncate_cell is not None: cell_mapping[cell] = truncate_cell row[i] = truncate_cell if answer is not None: for (i, case) in enumerate(answer): if case in cell_mapping.keys(): answer[i] = cell_mapping[case] def truncate_cell(self, cell_value): if isinstance(cell_value, int) or isinstance(cell_value, float): return cell_value if cell_value.strip() != '': try_tokens = self.tokenize(cell_value) if len(try_tokens) >= self.max_cell_length: retain_tokens = try_tokens[:self.max_cell_length] retain_cell_value = self.convert_tokens_to_string(retain_tokens) return retain_cell_value else: return None else: return cell_value def truncate_table_rows(self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]]=None, max_length=None): (delete_ratio, remain_token_len) = self.estimate_delete_ratio(table_content, question, max_length) self.delete_unrelated_rows(table_content, question, answer, delete_ratio) maximum_keep_rows = 0 for (ind, row_example) in enumerate(table_content['rows']): value_string = self.table_linearize.process_row(row_example, ind + 1) value_token_len = len(self.tokenize(value_string)) if value_token_len > remain_token_len: break remain_token_len -= value_token_len maximum_keep_rows += 1 del table_content['rows'][maximum_keep_rows:] def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None): if 'header' not in table_content or 'rows' not in table_content: raise ValueError("The table content should contain both 'header' and 'rows' keys.") question_tokens = self.tokenize(question, add_special_tokens=True) header_string = self.table_linearize.process_header(table_content['header']) header_tokens = self.tokenize(header_string, add_special_tokens=False) used_token_len = len(question_tokens) + len(header_tokens) remain_token_len = max_length - used_token_len value_string = '' for (_, row_example) in enumerate(table_content['rows']): value_string += self.table_linearize.process_row(row_example, 100) + ' ' value_token_len = len(self.tokenize(value_string)) if value_token_len < remain_token_len: return (0.0, remain_token_len) else: return (1.0 - remain_token_len / value_token_len, remain_token_len) def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float): truncated_unrelated_indices = [] related_indices = [] if answer is None or len(answer) == 0: answer_set = set() else: answer_set = {ans_ex.lower() for ans_ex in answer} if question is not None: answer_set.update(question.split()) question_set = set(question.strip('?!.,').split(' ')) row_max_len = len(table_content['rows']) for (_row_idx, row) in enumerate(table_content['rows']): lower_row = {str(cell).lower() for cell in row} if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0: truncated_unrelated_indices.append(_row_idx) else: related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2]) truncated_unrelated_indices = [_row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices] drop_items = min(len(truncated_unrelated_indices), int(len(table_content['rows']) * delete_ratio)) drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items) for _row_idx in reversed(range(row_max_len)): if _row_idx in drop_row_indices: del table_content['rows'][_row_idx] if 'id' in table_content and len(drop_row_indices) > 0: logger.warning('Delete {:.2f} rows in table {}'.format(len(drop_row_indices), table_content['id'])) # File: transformers-main/src/transformers/models/deprecated/trajectory_transformer/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_trajectory_transformer': ['TrajectoryTransformerConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_trajectory_transformer'] = ['TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer'] if TYPE_CHECKING: from .configuration_trajectory_transformer import TrajectoryTransformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class TrajectoryTransformerConfig(PretrainedConfig): model_type = 'trajectory_transformer' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=100, action_weight=5, reward_weight=1, value_weight=1, block_size=249, action_dim=6, observation_dim=17, transition_dim=25, n_layer=4, n_head=4, n_embd=128, embd_pdrop=0.1, attn_pdrop=0.1, resid_pdrop=0.1, learning_rate=0.0006, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, kaiming_initializer_range=1, use_cache=True, pad_token_id=1, bos_token_id=50256, eos_token_id=50256, **kwargs): self.vocab_size = vocab_size self.action_weight = action_weight self.reward_weight = reward_weight self.value_weight = value_weight self.max_position_embeddings = max_position_embeddings self.block_size = block_size self.action_dim = action_dim self.observation_dim = observation_dim self.transition_dim = transition_dim self.learning_rate = learning_rate self.n_layer = n_layer self.n_head = n_head self.n_embd = n_embd self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.resid_pdrop = resid_pdrop self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.kaiming_initializer_range = kaiming_initializer_range self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) # File: transformers-main/src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py """""" import torch import trajectory.utils as utils from transformers import TrajectoryTransformerModel class Parser(utils.Parser): dataset: str = 'halfcheetah-medium-expert-v2' config: str = 'config.offline' def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device): (gpt, gpt_epoch) = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device) trajectory_transformer = TrajectoryTransformerModel(gpt.config) trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict()) trajectory_transformer.pos_emb = gpt.pos_emb trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict()) trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict()) trajectory_transformer.head.load_state_dict(gpt.head.state_dict()) for (i, block) in enumerate(gpt.blocks): trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict()) trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict()) trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict()) trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict()) trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict()) trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict()) trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict()) torch.save(trajectory_transformer.state_dict(), 'pytorch_model.bin') if __name__ == '__main__': '' args = Parser().parse_args('plan') convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(args.logbase, args.dataset, args.gpt_loadpath, args.gpt_epoch, args.device) # File: transformers-main/src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py """""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F from ....modeling_utils import PreTrainedModel from ....utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_trajectory_transformer import TrajectoryTransformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2' _CONFIG_FOR_DOC = 'TrajectoryTransformerConfig' def load_tf_weights_in_trajectory_transformer(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model @dataclass class TrajectoryTransformerOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class TrajectoryTransformerPreTrainedModel(PreTrainedModel): config_class = TrajectoryTransformerConfig load_tf_weights = load_tf_weights_in_trajectory_transformer base_model_prefix = 'trajectory_transformer' main_input_name = 'trajectories' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, EinLinear): for i in range(module.n_models): nn.init.kaiming_uniform_(module.weight[i], a=math.sqrt(5) / self.config.kaiming_initializer_range) if module.bias is not None: (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(module.weight[i]) bound = 1 / math.sqrt(fan_in) * self.config.initializer_range nn.init.uniform_(module.bias[i], -bound, bound) TRAJECTORY_TRANSFORMER_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`TrajectoryTransformerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING = '\n Args:\n trajectories (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Batch of trajectories, where a trajectory is a sequence of states, actions and rewards.\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`, *optional*):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n targets (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Desired targets used to compute the loss.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class EinLinear(nn.Module): def __init__(self, n_models, in_features, out_features, bias): super().__init__() self.n_models = n_models self.out_features = out_features self.in_features = in_features self.weight = nn.Parameter(torch.Tensor(n_models, out_features, in_features)) if bias: self.bias = nn.Parameter(torch.Tensor(n_models, out_features)) else: self.register_parameter('bias', None) def reset_parameters(self): for i in range(self.n_models): nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5)) if self.bias is not None: (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.weight[i]) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias[i], -bound, bound) def forward(self, input): output = torch.einsum('eoi,bei->beo', self.weight, input) if self.bias is not None: raise RuntimeError() return output class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.n_embd % config.n_head != 0: raise ValueError(f'n_head ({config.n_head}) should be a divisor of n_embd ({config.n_embd})') self.key = nn.Linear(config.n_embd, config.n_embd) self.query = nn.Linear(config.n_embd, config.n_embd) self.value = nn.Linear(config.n_embd, config.n_embd) self.attn_drop = nn.Dropout(config.attn_pdrop) self.resid_drop = nn.Dropout(config.resid_pdrop) self.proj = nn.Linear(config.n_embd, config.n_embd) self.register_buffer('mask', torch.tril(torch.ones(config.block_size, config.block_size)).view(1, 1, config.block_size, config.block_size), persistent=False) joined_dim = config.observation_dim + config.action_dim + 2 self.mask.squeeze()[:, joined_dim - 1::joined_dim] = 0 self.n_head = config.n_head def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): (batch_size, sequence_length, embedding_dim) = hidden_states.size() key = self.key(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) query = self.query(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) value = self.value(hidden_states).view(batch_size, sequence_length, self.n_head, embedding_dim // self.n_head).transpose(1, 2) if layer_past is not None: (past_key, past_value) = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None attn_weights = torch.matmul(query, key.transpose(-2, -1)) * (1.0 / math.sqrt(key.size(-1))) attn_weights = attn_weights.masked_fill(self.mask[:, :, :sequence_length, :sequence_length] == 0, torch.finfo(attn_weights.dtype).min) attn_weights = F.softmax(attn_weights, dim=-1) self._attn_map = attn_weights.clone() attn_weights = self.attn_drop(attn_weights) output = torch.matmul(attn_weights, value) output = output.transpose(1, 2).contiguous().view(batch_size, sequence_length, embedding_dim) output = self.resid_drop(self.proj(output)) outputs = (output, present) if output_attentions: outputs += (attn_weights,) return outputs class Block(nn.Module): def __init__(self, config): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) self.ln2 = nn.LayerNorm(config.n_embd) self.attn = CausalSelfAttention(config) self.l1 = nn.Linear(config.n_embd, 4 * config.n_embd) self.act = nn.GELU() self.l2 = nn.Linear(4 * config.n_embd, config.n_embd) self.drop = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False): residual = hidden_states hidden_states = self.ln1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln2(hidden_states) hidden_states = self.l1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.l2(hidden_states) hidden_states = residual + self.drop(hidden_states) if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs @add_start_docstrings('The bare TrajectoryTransformer Model transformer outputting raw hidden-states without any specific head on top.', TRAJECTORY_TRANSFORMER_START_DOCSTRING) class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.tok_emb = nn.Embedding(config.vocab_size * config.transition_dim + 1, config.n_embd) self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd)) self.drop = nn.Dropout(config.embd_pdrop) self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)]) self.ln_f = nn.LayerNorm(config.n_embd) self.head = EinLinear(config.transition_dim, config.n_embd, config.vocab_size + 1, bias=False) self.vocab_size = config.vocab_size self.stop_token = config.vocab_size * config.transition_dim self.block_size = config.block_size self.observation_dim = config.observation_dim self.action_dim = config.action_dim self.transition_dim = config.transition_dim self.embedding_dim = config.n_embd self.action_weight = config.action_weight self.reward_weight = config.reward_weight self.value_weight = config.value_weight self.gradient_checkpointing = False self.post_init() def get_block_size(self): return self.block_size def offset_tokens(self, trajectories): (_, sequence_length) = trajectories.shape n_states = int(np.ceil(sequence_length / self.transition_dim)) offsets = torch.arange(self.transition_dim) * self.vocab_size offsets = offsets.repeat(n_states).to(trajectories.device) offset_trajectories = trajectories + offsets[:sequence_length] offset_trajectories[trajectories == self.vocab_size] = self.stop_token return offset_trajectories def pad_to_full_observation(self, hidden_states): (batch_size, sequence_length, _) = hidden_states.shape n_pad = (self.transition_dim - sequence_length % self.transition_dim) % self.transition_dim padding = torch.zeros(batch_size, n_pad, self.embedding_dim, device=hidden_states.device) hidden_states_pad = torch.cat([hidden_states, padding], dim=1) hidden_states_pad = hidden_states_pad.view(-1, self.transition_dim, self.embedding_dim) return (hidden_states_pad, n_pad) @add_start_docstrings_to_model_forward(TRAJECTORY_TRANSFORMER_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TrajectoryTransformerOutput, config_class=_CONFIG_FOR_DOC) def forward(self, trajectories: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, targets: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TrajectoryTransformerOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if past_key_values is None: past_key_values = tuple([None] * len(self.blocks)) (batch_size, sequence_length) = trajectories.size() if sequence_length > self.block_size: raise ValueError('Cannot forward, model block size is exhausted.') offset_trajectories = self.offset_tokens(trajectories) token_embeddings = self.tok_emb(offset_trajectories) position_embeddings = self.pos_emb[:, :sequence_length, :] hidden_states = self.drop(token_embeddings + position_embeddings) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.blocks, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, layer_past, use_cache, output_attentions) else: outputs = block(hidden_states, layer_past, use_cache, output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_state = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) (hidden_states_pad, n_pad) = self.pad_to_full_observation(hidden_state) logits = self.head(hidden_states_pad) logits = logits.reshape(batch_size, sequence_length + n_pad, self.vocab_size + 1) logits = logits[:, :sequence_length] if targets is not None: loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), targets.view(-1), reduction='none') if self.action_weight != 1 or self.reward_weight != 1 or self.value_weight != 1: n_states = int(np.ceil(sequence_length / self.transition_dim)) weights = torch.cat([torch.ones(self.observation_dim, device=trajectories.device), torch.ones(self.action_dim, device=trajectories.device) * self.action_weight, torch.ones(1, device=trajectories.device) * self.reward_weight, torch.ones(1, device=trajectories.device) * self.value_weight]) weights = weights.repeat(n_states) weights = weights[1:].repeat(batch_size, 1) loss = loss * weights.view(-1) loss = (loss * attention_mask.view(-1)).mean() else: loss = None if not return_dict: return tuple((v for v in [loss, logits, presents, all_hidden_states, all_self_attentions] if v is not None)) return TrajectoryTransformerOutput(loss=loss, logits=logits, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions) # File: transformers-main/src/transformers/models/deprecated/transfo_xl/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {'configuration_transfo_xl': ['TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_transfo_xl'] = ['AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_transfo_xl'] = ['TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel'] if TYPE_CHECKING: from .configuration_transfo_xl import TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class TransfoXLConfig(PretrainedConfig): model_type = 'transfo-xl' keys_to_ignore_at_inference = ['mems'] attribute_map = {'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=-1, adaptive=True, dropout=0.1, dropatt=0.0, untie_r=True, init='normal', init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-05, eos_token_id=0, **kwargs): self.vocab_size = vocab_size self.cutoffs = [] self.cutoffs.extend(cutoffs) if proj_share_all_but_first: self.tie_projs = [False] + [True] * len(self.cutoffs) else: self.tie_projs = [False] + [False] * len(self.cutoffs) self.d_model = d_model self.d_embed = d_embed self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.pre_lnorm = pre_lnorm self.n_layer = n_layer self.n_head = n_head self.mem_len = mem_len self.same_length = same_length self.attn_type = attn_type self.clamp_len = clamp_len self.sample_softmax = sample_softmax self.adaptive = adaptive self.dropout = dropout self.dropatt = dropatt self.untie_r = untie_r self.init = init self.init_range = init_range self.proj_init_std = proj_init_std self.init_std = init_std self.layer_norm_epsilon = layer_norm_epsilon super().__init__(eos_token_id=eos_token_id, **kwargs) @property def max_position_embeddings(self): logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.') return -1 @max_position_embeddings.setter def max_position_embeddings(self, value): raise NotImplementedError(f'The model {self.model_type} is one of the few models that has no sequence length limit.') # File: transformers-main/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py """""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() data_utils.Vocab = data_utils.TransfoXLTokenizer data_utils.Corpus = data_utils.TransfoXLCorpus sys.modules['data_utils'] = data_utils sys.modules['vocabulary'] = data_utils def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file): if transfo_xl_dataset_file: with open(transfo_xl_dataset_file, 'rb') as fp: corpus = pickle.load(fp, encoding='latin1') pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'Save vocabulary to {pytorch_vocab_dump_path}') corpus_vocab_dict = corpus.vocab.__dict__ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path) corpus_dict_no_vocab = corpus.__dict__ corpus_dict_no_vocab.pop('vocab', None) pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'Save dataset to {pytorch_dataset_dump_path}') torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path) if tf_checkpoint_path: config_path = os.path.abspath(transfo_xl_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.') if transfo_xl_config_file == '': config = TransfoXLConfig() else: config = TransfoXLConfig.from_json_file(transfo_xl_config_file) print(f'Building PyTorch model from configuration: {config}') model = TransfoXLLMHeadModel(config) model = load_tf_weights_in_transfo_xl(model, config, tf_path) pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print(f'Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}') torch.save(model.state_dict(), pytorch_weights_dump_path) print(f'Save configuration file to {os.path.abspath(pytorch_config_dump_path)}') with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.') parser.add_argument('--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.') parser.add_argument('--transfo_xl_config_file', default='', type=str, help='An optional config json file corresponding to the pre-trained BERT model. \nThis specifies the model architecture.') parser.add_argument('--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.') args = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file) # File: transformers-main/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py """""" from __future__ import annotations from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ....modeling_tf_utils import TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ....tf_utils import shape_list, stable_softmax from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_transfo_xl import TransfoXLConfig from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'transfo-xl/transfo-xl-wt103' _CONFIG_FOR_DOC = 'TransfoXLConfig' class TFPositionalEmbedding(keras.layers.Layer): def __init__(self, demb, **kwargs): super().__init__(**kwargs) self.inv_freq = 1 / 10000 ** (tf.range(0, demb, 2.0) / demb) def call(self, pos_seq, bsz=None): self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype) sinusoid_inp = tf.einsum('i,j->ij', pos_seq, self.inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) if bsz is not None: return tf.tile(pos_emb[:, None, :], [1, bsz, 1]) else: return pos_emb[:, None, :] class TFPositionwiseFF(keras.layers.Layer): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-05, init_std=0.02, **kwargs): super().__init__(**kwargs) self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.layer_1 = keras.layers.Dense(d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name='CoreNet_._0') self.drop_1 = keras.layers.Dropout(dropout) self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name='CoreNet_._3') self.drop_2 = keras.layers.Dropout(dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layer_norm') self.pre_lnorm = pre_lnorm def call(self, inp, training=False): if self.pre_lnorm: core_out = self.layer_norm(inp) core_out = self.layer_1(core_out) core_out = self.drop_1(core_out, training=training) core_out = self.layer_2(core_out) core_out = self.drop_2(core_out, training=training) output = core_out + inp else: core_out = self.layer_1(inp) core_out = self.drop_1(core_out, training=training) core_out = self.layer_2(core_out) core_out = self.drop_2(core_out, training=training) output = self.layer_norm(inp + core_out) return output class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.0, pre_lnorm=False, r_r_bias=None, r_w_bias=None, layer_norm_epsilon=1e-05, init_std=0.02, output_attentions=False, **kwargs): super().__init__(**kwargs) self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.output_attentions = output_attentions self.qkv_net = keras.layers.Dense(3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name='qkv_net') self.drop = keras.layers.Dropout(dropout) self.dropatt = keras.layers.Dropout(dropatt) self.o_net = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name='o_net') self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name='layer_norm') self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm if r_r_bias is not None and r_w_bias is not None: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias else: self.r_r_bias = None self.r_w_bias = None self.r_net = keras.layers.Dense(self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name='r_net') def build(self, input_shape): if self.r_r_bias is None or self.r_w_bias is None: self.r_r_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_r_bias') self.r_w_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_w_bias') super().build(input_shape) def _rel_shift(self, x): x_size = shape_list(x) x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]]) x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]]) x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) x = tf.reshape(x, x_size) return x def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False): (qlen, rlen, bsz) = (shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]) if mems is not None: mems = tf.cast(mems, dtype=w.dtype) cat = tf.concat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = tf.split(w_heads, 3, axis=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = tf.split(w_heads, 3, axis=-1) klen = shape_list(w_head_k)[0] w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) rw_head_q = w_head_q + self.r_w_bias AC = tf.einsum('ibnd,jbnd->ijbn', rw_head_q, w_head_k) rr_head_q = w_head_q + self.r_r_bias BD = tf.einsum('ibnd,jnd->ijbn', rr_head_q, r_head_k) BD = self._rel_shift(BD) attn_score = AC + BD attn_score = attn_score * self.scale if attn_mask is not None: attn_mask_t = attn_mask[:, :, None, None] attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype) attn_score = attn_score * (1.0 - attn_mask_t) - 1e+30 * attn_mask_t attn_prob = stable_softmax(attn_score, axis=1) attn_prob = self.dropatt(attn_prob, training=training) if head_mask is not None: attn_prob = attn_prob * head_mask attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, w_head_v) attn_vec_sizes = shape_list(attn_vec) attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head)) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out, training=training) if self.pre_lnorm: outputs = [w + attn_out] else: outputs = [self.layer_norm(w + attn_out)] if output_attentions: outputs.append(attn_prob) return outputs class TFRelPartialLearnableDecoderLayer(keras.layers.Layer): def __init__(self, n_head, d_model, d_head, d_inner, dropout, dropatt=0.0, pre_lnorm=False, r_w_bias=None, r_r_bias=None, layer_norm_epsilon=1e-05, init_std=0.02, output_attentions=False, **kwargs): super().__init__(**kwargs) self.dec_attn = TFRelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm, r_w_bias=r_w_bias, r_r_bias=r_r_bias, init_std=init_std, layer_norm_epsilon=layer_norm_epsilon, output_attentions=output_attentions, name='dec_attn') self.pos_ff = TFPositionwiseFF(d_model, d_inner, dropout, pre_lnorm=pre_lnorm, init_std=init_std, layer_norm_epsilon=layer_norm_epsilon, name='pos_ff') def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False): attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training) ff_output = self.pos_ff(attn_outputs[0], training=training) outputs = [ff_output] + attn_outputs[1:] return outputs class TFTransfoEmbeddings(keras.layers.Layer): def __init__(self, vocab_size, emb_size, init_std, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.emb_size = emb_size self.init_std = init_std def build(self, input_shape): self.weight = self.add_weight(shape=(self.vocab_size, self.emb_size), initializer=get_initializer(self.init_std), name='embeddings') super().build(input_shape) def call(self, inputs): return tf.gather(self.weight, inputs) class TFAdaptiveEmbedding(keras.layers.Layer): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs): super().__init__(**kwargs) self.n_token = n_token self.d_embed = d_embed self.init_std = init_std self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = [] self.emb_projs = [] if div_val == 1: raise NotImplementedError else: for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = d_embed // div_val ** i self.emb_layers.append(TFTransfoEmbeddings(r_idx - l_idx, d_emb_i, init_std, name=f'emb_layers_._{i}')) def build(self, input_shape): for i in range(len(self.cutoffs)): d_emb_i = self.d_embed // self.div_val ** i self.emb_projs.append(self.add_weight(shape=(d_emb_i, self.d_proj), initializer=get_initializer(self.init_std), trainable=True, name=f'emb_projs_._{i}')) super().build(input_shape) def call(self, inp): if self.div_val == 1: raise NotImplementedError else: inp_flat = tf.reshape(inp, (-1,)) emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj]) for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = tf.einsum('id,de->ie', emb_i, self.emb_projs[i]) mask_idx = tf.where(mask_i) scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat)) emb_flat = tf.cast(emb_flat, dtype=scatter.dtype) emb_flat += scatter embed_shape = shape_list(inp) + [self.d_proj] embed = tf.reshape(emb_flat, embed_shape) embed *= self.emb_scale return embed @keras_serializable class TFTransfoXLMainLayer(keras.layers.Layer): config_class = TransfoXLConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.use_return_dict self.n_token = config.vocab_size self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.untie_r = config.untie_r self.word_emb = TFAdaptiveEmbedding(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, init_std=config.init_std, name='word_emb') self.drop = keras.layers.Dropout(config.dropout) self.n_layer = config.n_layer self.mem_len = config.mem_len self.attn_type = config.attn_type self.layers = [] if config.attn_type == 0: for i in range(config.n_layer): self.layers.append(TFRelPartialLearnableDecoderLayer(config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if self.untie_r else self.r_w_bias, r_r_bias=None if self.untie_r else self.r_r_bias, layer_norm_epsilon=config.layer_norm_epsilon, init_std=config.init_std, output_attentions=self.output_attentions, name=f'layers_._{i}')) else: raise NotImplementedError self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: self.pos_emb = TFPositionalEmbedding(self.d_model, name='pos_emb') else: raise NotImplementedError def build(self, input_shape): if not self.untie_r: self.r_w_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_w_bias') self.r_r_bias = self.add_weight(shape=(self.n_head, self.d_head), initializer='zeros', trainable=True, name='r_r_bias') super().build(input_shape) def get_input_embeddings(self): return self.word_emb def set_input_embeddings(self, value): raise NotImplementedError def backward_compatible(self): self.sample_softmax = -1 def reset_memory_length(self, mem_len): self.mem_len = mem_len def _prune_heads(self, heads): raise NotImplementedError def init_mems(self, bsz): if self.mem_len > 0: mems = [] for i in range(self.n_layer): empty = tf.zeros([self.mem_len, bsz, self.d_model]) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, mlen, qlen): if mems is None: return None assert len(hids) == len(mems), 'len(hids) != len(mems)' new_mems = [] end_idx = mlen + tf.math.maximum(0, qlen) beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len)) for i in range(len(hids)): mems[i] = tf.cast(mems[i], dtype=hids[i].dtype) cat = tf.concat([mems[i], hids[i]], axis=0) tf.stop_gradient(cat) new_mems.append(cat[beg_idx:end_idx]) return new_mems @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, mems: List[tf.Tensor] | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_ids = tf.transpose(input_ids, perm=(1, 0)) (qlen, bsz) = shape_list(input_ids) elif inputs_embeds is not None: inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) (qlen, bsz) = shape_list(inputs_embeds)[:2] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if mems is None: mems = self.init_mems(bsz) if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layer if inputs_embeds is not None: word_emb = inputs_embeds else: word_emb = self.word_emb(input_ids) mlen = shape_list(mems[0])[0] if mems is not None else 0 klen = mlen + qlen all_ones = tf.ones([qlen, klen], dtype=tf.int32) upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen) if self.same_length: mask_len = klen - self.mem_len mask_shift_len = qlen - tf.nn.relu(mask_len) lower_mask = tf.linalg.band_part(all_ones, -1, 0) - tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32) dec_attn_mask = upper_mask + lower_mask else: dec_attn_mask = upper_mask hids = [] attentions = [] if output_attentions else None if self.attn_type == 0: pos_seq = tf.range(klen - 1, -1, -1.0) if self.clamp_len > 0: pos_seq = tf.minimum(pos_seq, self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb, training=training) pos_emb = self.drop(pos_emb, training=training) for (i, layer) in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] layer_outputs = layer(core_out, pos_emb, dec_attn_mask, mems_i, head_mask[i], output_attentions, training=training) core_out = layer_outputs[0] if output_attentions: attentions.append(layer_outputs[1]) else: raise NotImplementedError core_out = self.drop(core_out, training=training) new_mems = self._update_mems(hids, mems, mlen, qlen) core_out = tf.transpose(core_out, perm=(1, 0, 2)) if output_hidden_states: hids = tuple((tf.transpose(t, perm=(1, 0, 2)) for t in hids)) hids = hids + (core_out,) else: hids = None if output_attentions: attentions = tuple((tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)) if not return_dict: return tuple((v for v in [core_out, new_mems, hids, attentions] if v is not None)) return TFTransfoXLModelOutput(last_hidden_state=core_out, mems=new_mems, hidden_states=hids, attentions=attentions) class TFTransfoXLPreTrainedModel(TFPreTrainedModel): config_class = TransfoXLConfig base_model_prefix = 'transformer' @dataclass class TFTransfoXLModelOutput(ModelOutput): last_hidden_state: tf.Tensor = None mems: List[tf.Tensor] = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFTransfoXLLMHeadModelOutput(ModelOutput): prediction_scores: tf.Tensor = None mems: List[tf.Tensor] = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @dataclass class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput): loss: tf.Tensor | None = None logits: tf.Tensor = None mems: List[tf.Tensor] = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None TRANSFO_XL_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' TRANSFO_XL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n mems (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as `input_ids` as they have already been computed.\n head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', TRANSFO_XL_START_DOCSTRING) class TFTransfoXLModel(TFTransfoXLPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFTransfoXLMainLayer(config, name='transformer') @unpack_inputs @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, mems: List[tf.Tensor] | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, training: bool=False) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]: outputs = self.transformer(input_ids=input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs @add_start_docstrings('\n The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive\n input embeddings)\n ', TRANSFO_XL_START_DOCSTRING) class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = TFTransfoXLMainLayer(config, name='transformer') self.sample_softmax = config.sample_softmax assert self.sample_softmax <= 0, 'Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310' self.crit = TFAdaptiveSoftmaxMask(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name='crit') def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError() def get_output_embeddings(self): if len(self.crit.out_layers) > 0: return self.crit.out_layers[-1] return None def reset_memory_length(self, mem_len): self.transformer.reset_memory_length(mem_len) def init_mems(self, bsz): return self.transformer.init_mems(bsz) @unpack_inputs @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, mems: List[tf.Tensor] | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]: if input_ids is not None: (bsz, tgt_len) = shape_list(input_ids)[:2] else: (bsz, tgt_len) = shape_list(inputs_embeds)[:2] transformer_outputs = self.transformer(input_ids, mems, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict, training=training) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] softmax_output = self.crit(pred_hid, labels, training=training) prediction_scores = softmax_output if labels is None else () if not return_dict: return (prediction_scores,) + transformer_outputs[1:] return TFTransfoXLLMHeadModelOutput(prediction_scores=prediction_scores, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs): inputs = {} if past_key_values: input_ids = tf.expand_dims(input_ids[:, -1], axis=-1) else: input_ids = input_ids return inputs def tf_to_pt_weight_rename(self, tf_weight): if self.config.tie_word_embeddings and 'crit.out_layers' in tf_weight: return (tf_weight, tf_weight.replace('crit.out_layers', 'transformer.word_emb.emb_layers')) elif self.config.tie_projs and 'crit.out_projs' in tf_weight: for (i, tie_proj) in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and (self.config.d_model != self.config.d_embed): return (tf_weight, tf_weight.replace(f'crit.out_projs.{i}', 'transformer.word_emb.emb_projs.0')) elif tie_proj and self.config.div_val != 1: return (tf_weight, tf_weight.replace('crit.out_projs', 'transformer.word_emb.emb_projs')) else: return (tf_weight,) @add_start_docstrings('\n The Transfo XL Model transformer with a sequence classification head on top (linear layer).\n\n [`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1,GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', TRANSFO_XL_START_DOCSTRING) class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.init_range), name='score', use_bias=False) self.transformer = TFTransfoXLMainLayer(config, name='transformer') def get_output_embeddings(self): logger.warning('Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed in transformers v4.32.') return self.transformer.word_emb @unpack_inputs @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, mems: List[tf.Tensor] | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]: transformer_outputs = self.transformer(input_ids=input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1 sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') loss = None if labels is not None: if input_ids is not None: (batch_size, sequence_length) = shape_list(input_ids)[:2] else: (batch_size, sequence_length) = shape_list(inputs_embeds)[:2] assert self.config.pad_token_id is not None or batch_size == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if not tf.is_tensor(sequence_lengths): in_logits = logits[0:batch_size, sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFTransfoXLSequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py """""" import tensorflow as tf from ....modeling_tf_utils import keras from ....tf_utils import shape_list class TFAdaptiveSoftmaxMask(keras.layers.Layer): def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [vocab_size] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.keep_order = keep_order self.out_layers = [] self.out_projs = [] def build(self, input_shape): if self.n_clusters > 0: self.cluster_weight = self.add_weight(shape=(self.n_clusters, self.d_embed), initializer='zeros', trainable=True, name='cluster_weight') self.cluster_bias = self.add_weight(shape=(self.n_clusters,), initializer='zeros', trainable=True, name='cluster_bias') if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: weight = self.add_weight(shape=(self.d_embed, self.d_proj), initializer='zeros', trainable=True, name=f'out_projs_._{i}') self.out_projs.append(weight) else: self.out_projs.append(None) weight = self.add_weight(shape=(self.vocab_size, self.d_embed), initializer='zeros', trainable=True, name=f'out_layers_._{i}_._weight') bias = self.add_weight(shape=(self.vocab_size,), initializer='zeros', trainable=True, name=f'out_layers_._{i}_._bias') self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = self.d_embed // self.div_val ** i weight = self.add_weight(shape=(d_emb_i, self.d_proj), initializer='zeros', trainable=True, name=f'out_projs_._{i}') self.out_projs.append(weight) weight = self.add_weight(shape=(r_idx - l_idx, d_emb_i), initializer='zeros', trainable=True, name=f'out_layers_._{i}_._weight') bias = self.add_weight(shape=(r_idx - l_idx,), initializer='zeros', trainable=True, name=f'out_layers_._{i}_._bias') self.out_layers.append((weight, bias)) super().build(input_shape) @staticmethod def _logit(x, W, b, proj=None): y = x if proj is not None: y = tf.einsum('ibd,ed->ibe', y, proj) return tf.einsum('ibd,nd->ibn', y, W) + b @staticmethod def _gather_logprob(logprob, target): lp_size = shape_list(logprob) r = tf.range(lp_size[0], dtype=target.dtype) idx = tf.stack([r, target], 1) return tf.gather_nd(logprob, idx) def call(self, hidden, target, return_mean=True, training=False): head_logprob = 0 if self.n_clusters == 0: output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0]) if target is not None: loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output) out = tf.nn.log_softmax(output, axis=-1) else: hidden_sizes = shape_list(hidden) out = [] loss = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) if target is not None: mask = (target >= l_idx) & (target < r_idx) mask_idx = tf.where(mask) cur_target = tf.boolean_mask(target, mask) - l_idx if self.div_val == 1: cur_W = self.out_layers[0][0][l_idx:r_idx] cur_b = self.out_layers[0][1][l_idx:r_idx] else: cur_W = self.out_layers[i][0] cur_b = self.out_layers[i][1] if i == 0: cur_W = tf.concat([cur_W, self.cluster_weight], 0) cur_b = tf.concat([cur_b, self.cluster_bias], 0) head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0]) head_logprob = tf.nn.log_softmax(head_logit) out.append(head_logprob[..., :self.cutoffs[0]]) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_logprob = self._gather_logprob(cur_head_logprob, cur_target) else: tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i]) tail_logprob = tf.nn.log_softmax(tail_logit) cluster_prob_idx = self.cutoffs[0] + i - 1 logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(logprob_i) if target is not None: cur_head_logprob = tf.boolean_mask(head_logprob, mask) cur_tail_logprob = tf.boolean_mask(tail_logprob, mask) cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss)) out = tf.concat(out, axis=-1) if target is not None: if return_mean: loss = tf.reduce_mean(loss) self.add_loss(loss) self.add_metric(loss, name=self.name, aggregation='mean' if return_mean else '') return out # File: transformers-main/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py """""" import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....modeling_utils import PreTrainedModel from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_transfo_xl import TransfoXLConfig from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'transfo-xl/transfo-xl-wt103' _CONFIG_FOR_DOC = 'TransfoXLConfig' def build_tf_to_pytorch_map(model, config): tf_to_pt_map = {} if hasattr(model, 'transformer'): tf_to_pt_map.update({'transformer/adaptive_softmax/cutoff_0/cluster_W': model.crit.cluster_weight, 'transformer/adaptive_softmax/cutoff_0/cluster_b': model.crit.cluster_bias}) for (i, (out_l, proj_l, tie_proj)) in enumerate(zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = f'transformer/adaptive_softmax/cutoff_{i}/' if config.tie_word_embeddings: tf_to_pt_map.update({layer_str + 'b': out_l.bias}) else: raise NotImplementedError tf_to_pt_map.update({layer_str + 'lookup_table': out_l.weight, layer_str + 'b': out_l.bias}) if not tie_proj: tf_to_pt_map.update({layer_str + 'proj': proj_l}) model = model.transformer for (i, (embed_l, proj_l)) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = f'transformer/adaptive_embed/cutoff_{i}/' tf_to_pt_map.update({layer_str + 'lookup_table': embed_l.weight, layer_str + 'proj_W': proj_l}) for (i, b) in enumerate(model.layers): layer_str = f'transformer/layer_{i}/' tf_to_pt_map.update({layer_str + 'rel_attn/LayerNorm/gamma': b.dec_attn.layer_norm.weight, layer_str + 'rel_attn/LayerNorm/beta': b.dec_attn.layer_norm.bias, layer_str + 'rel_attn/o/kernel': b.dec_attn.o_net.weight, layer_str + 'rel_attn/qkv/kernel': b.dec_attn.qkv_net.weight, layer_str + 'rel_attn/r/kernel': b.dec_attn.r_net.weight, layer_str + 'ff/LayerNorm/gamma': b.pos_ff.layer_norm.weight, layer_str + 'ff/LayerNorm/beta': b.pos_ff.layer_norm.bias, layer_str + 'ff/layer_1/kernel': b.pos_ff.CoreNet[0].weight, layer_str + 'ff/layer_1/bias': b.pos_ff.CoreNet[0].bias, layer_str + 'ff/layer_2/kernel': b.pos_ff.CoreNet[3].weight, layer_str + 'ff/layer_2/bias': b.pos_ff.CoreNet[3].bias}) if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({'transformer/r_r_bias': r_r_list, 'transformer/r_w_bias': r_w_list}) return tf_to_pt_map def load_tf_weights_in_transfo_xl(model, config, tf_path): try: import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_to_pt_map = build_tf_to_pytorch_map(model, config) init_vars = tf.train.list_variables(tf_path) tf_weights = {} for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for (name, pointer) in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] if 'kernel' in name or 'proj' in name: array = np.transpose(array) if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1: assert len(pointer) == array.shape[0] for (i, p_i) in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info(f'Initialize PyTorch weight {name} for layer {i}') p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape, f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + '/Adam', None) tf_weights.pop(name + '/Adam_1', None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model class PositionalEmbedding(nn.Module): def __init__(self, demb): super().__init__() self.demb = demb inv_freq = 1 / 10000 ** (torch.arange(0.0, demb, 2.0) / demb) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.outer(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-05): super().__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential(nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout)) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: core_out = self.CoreNet(self.layer_norm(inp)) output = core_out + inp else: core_out = self.CoreNet(inp) output = self.layer_norm(inp + core_out) return output class RelPartialLearnableMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False, r_r_bias=None, r_w_bias=None, layer_norm_epsilon=1e-05): super().__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon) self.scale = 1 / d_head ** 0.5 self.pre_lnorm = pre_lnorm if r_r_bias is None or r_w_bias is None: self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) else: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def _rel_shift(self, x): zero_pad_shape = (x.size(0), 1) + x.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:] x_padded = x_padded.view(*x_padded_shape) x = x_padded[1:].view_as(x) return x def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False): (qlen, rlen, bsz) = (w.size(0), r.size(0), w.size(1)) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) (w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) rw_head_q = w_head_q + self.r_w_bias AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) BD = self._rel_shift(BD) attn_score = AC + BD attn_score.mul_(self.scale) mask_value = torch.finfo(attn_score.dtype).min if attn_mask is not None and torch.sum(attn_mask).item(): attn_mask = attn_mask == 1 if attn_mask.dim() == 2: attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score) attn_prob = nn.functional.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) if head_mask is not None: attn_prob = attn_prob * head_mask attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: outputs = [w + attn_out] else: outputs = [self.layer_norm(w + attn_out)] if output_attentions: outputs.append(attn_prob) return outputs class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-05, **kwargs): super().__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm'), layer_norm_epsilon=layer_norm_epsilon) def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False): attn_outputs = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems, head_mask=head_mask, output_attentions=output_attentions) ff_output = self.pos_ff(attn_outputs[0]) outputs = [ff_output] + attn_outputs[1:] return outputs class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = d_embed // div_val ** i self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = nn.functional.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = nn.functional.linear(emb_i, self.emb_projs[i]) emb_flat.index_copy_(0, indices_i, emb_i) embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) return embed class TransfoXLPreTrainedModel(PreTrainedModel): config_class = TransfoXLConfig load_tf_weights = load_tf_weights_in_transfo_xl base_model_prefix = 'transformer' def _init_weight(self, weight): if self.config.init == 'uniform': nn.init.uniform_(weight, -self.config.init_range, self.config.init_range) elif self.config.init == 'normal': nn.init.normal_(weight, 0.0, self.config.init_std) def _init_bias(self, bias): nn.init.constant_(bias, 0.0) def _init_weights(self, m): classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: self._init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: self._init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): self._init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: self._init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: self._init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, 'bias') and m.bias is not None: self._init_bias(m.bias) else: if hasattr(m, 'r_emb'): self._init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): self._init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): self._init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): self._init_bias(m.r_bias) def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, layer: Optional[int]=-1): base_model = getattr(self, self.base_model_prefix, self) if new_num_tokens is None: return self.get_input_embeddings() (new_num_tokens_layer, layer) = self._get_new_num_tokens_layer(new_num_tokens, layer) assert new_num_tokens_layer > 0, 'The size of the new embedding layer cannot be 0 or less' model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer) self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens base_model.n_token = new_num_tokens new_embedding_shapes = self._get_embedding_shapes() self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer) self.tie_weights() return model_embeds def _get_new_num_tokens_layer(self, new_num_tokens, layer): embeddings = self.get_input_embeddings() if layer == -1: layer = len(embeddings.emb_layers) - 1 assert 0 <= layer <= len(embeddings.emb_layers) - 1 new_num_tokens_layer = new_num_tokens - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]]) - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1:]]) return (new_num_tokens_layer, layer) def _get_embedding_shapes(self): embeddings = self.get_input_embeddings() return [emb.weight.shape[0] for emb in embeddings.emb_layers] def _resize_token_embeddings(self, new_num_tokens, layer=-1): embeddings = self.get_input_embeddings() if new_num_tokens is None: return embeddings new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens) embeddings.emb_layers[layer] = new_embeddings_layer self.set_input_embeddings(embeddings) return self.get_input_embeddings() def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): embeddings = self.get_input_embeddings() for i in range(layer, len(embeddings.cutoffs)): embeddings.cutoffs[i] = sum(new_embedding_shapes[:i + 1]) embeddings.cutoff_ends = [0] + embeddings.cutoffs embeddings.n_token = new_num_tokens self.config.cutoffs = embeddings.cutoffs[:-1] return embeddings.cutoffs @dataclass class TransfoXLModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class TransfoXLSequenceClassifierOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class TransfoXLLMHeadModelOutput(ModelOutput): losses: Optional[torch.FloatTensor] = None prediction_scores: torch.FloatTensor = None mems: List[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): return self.prediction_scores TRANSFO_XL_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' TRANSFO_XL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems\n given to this model should not be passed as `input_ids` as they have already been computed.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Bert Model transformer outputting raw hidden-states without any specific head on top.', TRANSFO_XL_START_DOCSTRING) class TransfoXLModel(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.n_token = config.vocab_size self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.word_emb = AdaptiveEmbedding(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.drop = nn.Dropout(config.dropout) self.n_layer = config.n_layer self.mem_len = config.mem_len self.attn_type = config.attn_type if not config.untie_r: self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.layers = nn.ModuleList() if config.attn_type == 0: for i in range(config.n_layer): self.layers.append(RelPartialLearnableDecoderLayer(config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias, layer_norm_epsilon=config.layer_norm_epsilon)) else: raise NotImplementedError self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: self.pos_emb = PositionalEmbedding(self.d_model) else: raise NotImplementedError self.post_init() def get_input_embeddings(self): return self.word_emb def set_input_embeddings(self, new_embeddings): self.word_emb = new_embeddings def backward_compatible(self): self.sample_softmax = -1 def reset_memory_length(self, mem_len): self.mem_len = mem_len def _prune_heads(self, heads): logger.info('Head pruning is not implemented for Transformer-XL model') pass def init_mems(self, bsz): if self.mem_len > 0: mems = [] param = next(self.parameters()) for i in range(self.n_layer): empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, mlen, qlen): if mems is None: return None assert len(hids) == len(mems), 'len(hids) != len(mems)' with torch.no_grad(): new_mems = [] end_idx = mlen + max(0, qlen) beg_idx = max(0, end_idx - self.mem_len) for i in range(len(hids)): cat = torch.cat([mems[i], hids[i]], dim=0) new_mems.append(cat[beg_idx:end_idx].detach()) return new_mems @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[List[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TransfoXLModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() (qlen, bsz) = input_ids.size() elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() (qlen, bsz) = (inputs_embeds.shape[0], inputs_embeds.shape[1]) else: raise ValueError('You have to specify either input_ids or inputs_embeds') if mems is None: mems = self.init_mems(bsz) if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.n_layer if inputs_embeds is not None: word_emb = inputs_embeds else: word_emb = self.word_emb(input_ids) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool) mask_len = klen - self.mem_len if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] else: dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[:, :, None] hids = [] attentions = [] if output_attentions else None if self.attn_type == 0: pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for (i, layer) in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] layer_outputs = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i, head_mask=head_mask[i], output_attentions=output_attentions) core_out = layer_outputs[0] if output_attentions: attentions.append(layer_outputs[1]) else: raise NotImplementedError core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, mlen, qlen) if output_hidden_states: hids.append(core_out) hids = tuple((t.transpose(0, 1).contiguous() for t in hids)) else: hids = None if output_attentions: attentions = tuple((t.permute(2, 3, 0, 1).contiguous() for t in attentions)) core_out = core_out.transpose(0, 1).contiguous() if not return_dict: return tuple((v for v in [core_out, new_mems, hids, attentions] if v is not None)) return TransfoXLModelOutput(last_hidden_state=core_out, mems=new_mems, hidden_states=hids, attentions=attentions) @add_start_docstrings('\n The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive\n input embeddings)\n ', TRANSFO_XL_START_DOCSTRING) class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): _tied_weights_keys = ['crit\\.out_projs\\.\\d+', 'crit\\.out_layers\\.\\d+\\.weight'] def __init__(self, config): super().__init__(config) self.transformer = TransfoXLModel(config) self.sample_softmax = config.sample_softmax self.trainer_compatible = getattr(config, 'trainer_compatible', False) if not self.trainer_compatible: warnings.warn('The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order to use that updated output, please specify `trainer_compatible=True` as your configuration attribute.', DeprecationWarning) assert self.sample_softmax <= 0, 'Sampling from the softmax is not implemented yet. Please look at issue: #3310: https://github.com/huggingface/transformers/issues/3310' self.crit = ProjectedAdaptiveLogSoftmax(config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.post_init() def tie_weights(self): if self.config.tie_word_embeddings: for i in range(len(self.crit.out_layers)): self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i]) if self.config.tie_projs: for (i, tie_proj) in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and (self.config.d_model != self.config.d_embed): if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: if self.config.torchscript: self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone()) else: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] def reset_memory_length(self, mem_len): self.transformer.reset_memory_length(mem_len) def init_mems(self, bsz): return self.transformer.init_mems(bsz) @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[List[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TransfoXLLMHeadModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: (bsz, tgt_len) = (input_ids.size(0), input_ids.size(1)) elif inputs_embeds is not None: (bsz, tgt_len) = (inputs_embeds.size(0), inputs_embeds.size(1)) else: raise ValueError('You have to specify either input_ids or inputs_embeds') transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] if labels is not None: miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100 if miss_valid_label: labels[0, 1] = self.config.eos_token_id softmax_output = self.crit(pred_hid, labels) prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else () if labels is not None: losses = softmax_output.view(bsz, tgt_len - 1) loss = losses[losses != 0].mean() else: (losses, loss) = (None, None) if not return_dict: if self.trainer_compatible: output = (prediction_scores, losses) if losses is not None else (prediction_scores,) output += transformer_outputs[1:] return (loss,) + output if loss is not None else output else: output = (prediction_scores, *transformer_outputs[1:]) output = (losses,) + output if losses is not None else output return output + (loss,) if loss is not None else output return TransfoXLLMHeadModelOutput(loss=loss, prediction_scores=prediction_scores, losses=losses, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def get_output_embeddings(self): if self.sample_softmax > 0: return self.out_layer else: return self.crit.out_layers[-1] def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs): inputs = {} if past_key_values: inputs['mems'] = past_key_values inputs['input_ids'] = input_ids[:, -1].unsqueeze(-1) else: inputs['input_ids'] = input_ids return inputs def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer): new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer) self.crit.cutoffs = new_cutoffs self.crit.cutoff_ends = [0] + new_cutoffs self.crit.n_token = new_num_tokens @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings('\n The Transformer-XL Model transformer with a sequence classification head on top (linear layer).\n\n [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', TRANSFO_XL_START_DOCSTRING) class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = TransfoXLModel(config) self.score = nn.Linear(config.d_embed, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, mems: Optional[List[torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] assert self.config.pad_token_id is not None or batch_size == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[range(batch_size), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TransfoXLSequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py """""" import torch from torch import nn class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = nn.ModuleList() self.out_projs = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed))) else: self.out_projs.append(None) self.out_layers.append(nn.Linear(d_embed, n_token)) else: for i in range(len(self.cutoffs)): (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) d_emb_i = d_embed // div_val ** i self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i))) self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx)) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is None: logit = nn.functional.linear(hidden, weight, bias=bias) else: proj_hid = nn.functional.linear(hidden, proj.t().contiguous()) logit = nn.functional.linear(proj_hid, weight, bias=bias) return logit def forward(self, hidden, labels=None, keep_order=False): if labels is not None: hidden = hidden[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() hidden = hidden.view(-1, hidden.size(-1)) labels = labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError('Input and labels should have the same size in the batch dimension.') else: hidden = hidden.view(-1, hidden.size(-1)) if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if labels is not None: mask = labels != -100 out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device) out[mask] = -nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1) else: out = nn.functional.log_softmax(logit, dim=-1) else: (weights, biases) = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) (head_weight, head_bias, head_proj) = (weights[0], biases[0], self.out_projs[0]) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = nn.functional.log_softmax(head_logit, dim=1) if labels is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): (l_idx, r_idx) = (cutoff_values[i], cutoff_values[i + 1]) if labels is not None: mask_i = (labels >= l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: (weight_i, bias_i, proj_i) = (weights[i], biases[i], self.out_projs[i]) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 if labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if labels is not None: if hasattr(self, 'keep_order') and self.keep_order or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset + logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def log_prob(self, hidden): if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return nn.functional.log_softmax(logit, dim=-1) else: (weights, biases) = ([], []) for i in range(len(self.cutoffs)): if self.div_val == 1: (l_idx, r_idx) = (self.cutoff_ends[i], self.cutoff_ends[i + 1]) weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat([weight_i, self.cluster_weight], dim=0) bias_i = torch.cat([bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) (head_weight, head_bias, head_proj) = (weights[0], biases[0], self.out_projs[0]) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = nn.functional.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): (start_idx, stop_idx) = (cutoff_values[i], cutoff_values[i + 1]) if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: (weight_i, bias_i, proj_i) = (weights[i], biases[i], self.out_projs[i]) tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out # File: transformers-main/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py """""" import glob import os import pickle import re from collections import Counter, OrderedDict from typing import List, Optional, Tuple import numpy as np from ....tokenization_utils import PreTrainedTokenizer from ....utils import cached_file, is_sacremoses_available, is_torch_available, logging, requires_backends, strtobool, torch_only_method if is_sacremoses_available(): import sacremoses as sm if is_torch_available(): import torch logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'pretrained_vocab_file': 'vocab.pkl', 'pretrained_vocab_file_torch': 'vocab.bin', 'vocab_file': 'vocab.txt'} PRETRAINED_CORPUS_ARCHIVE_MAP = {'transfo-xl/transfo-xl-wt103': 'https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin'} CORPUS_NAME = 'corpus.bin' MATCH_NUMBERS = ('(?<=\\d)[,.](?=\\d)', ' @\\g<0>@ ') DETOKENIZE_NUMBERS = [(' @\\,@ ', ','), (' @\\.@ ', '.')] def tokenize_numbers(text_array: List[str]) -> List[str]: tokenized = [] for i in range(len(text_array)): (reg, sub) = MATCH_NUMBERS replaced = re.sub(reg, sub, text_array[i]).split() tokenized.extend(replaced) return tokenized def detokenize_numbers(text: str) -> str: for (reg, sub) in DETOKENIZE_NUMBERS: text = re.sub(reg, sub, text) return text class TransfoXLTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids'] def __init__(self, special=None, min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, pretrained_vocab_file: str=None, never_split=None, unk_token='', eos_token='', additional_special_tokens=[''], language='en', **kwargs): logger.error("`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. See more details on this model's documentation page: `https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`.") requires_backends(self, 'sacremoses') if special is None: special = [] self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~' self.punction_without_space_before_pattern = re.compile(f'[^\\s][{self.punctuation_symbols}]') self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern() self.language = language self.moses_punct_normalizer = sm.MosesPunctNormalizer(language) self.moses_tokenizer = sm.MosesTokenizer(language) self.moses_detokenizer = sm.MosesDetokenizer(language) self.idx2sym = [] self.sym2idx = OrderedDict() try: vocab_dict = None if pretrained_vocab_file is not None: if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')): raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.") with open(pretrained_vocab_file, 'rb') as f: vocab_dict = pickle.load(f) if isinstance(vocab_dict, int): if not is_torch_available(): raise ImportError('Not trying to load dict with PyTorch as you need to install pytorch to load from a PyTorch pretrained vocabulary, or activate it with environment variables USE_TORCH=1 and USE_TF=0.') vocab_dict = torch.load(pretrained_vocab_file) if vocab_dict is not None: for (key, value) in vocab_dict.items(): if key not in self.__dict__ or key in ['sym2idx', 'idx2sym']: self.__dict__[key] = value elif vocab_file is not None: self.build_vocab() except Exception as e: raise ValueError(f'Unable to parse file {pretrained_vocab_file}. Unknown format. If you tried to load a model saved through TransfoXLTokenizerFast, please note they are not compatible.') from e if vocab_file is not None: self.build_vocab() super().__init__(special=special, min_freq=min_freq, max_size=max_size, lower_case=lower_case, delimiter=delimiter, vocab_file=vocab_file, pretrained_vocab_file=pretrained_vocab_file, never_split=never_split, unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, language=language, **kwargs) if never_split is None: never_split = self.all_special_tokens self.never_split = never_split @property def do_lower_case(self): return self.lower_case def _compile_space_around_punctuation_pattern(self): look_ahead_for_special_token = f'(?=[{self.punctuation_symbols}])' look_ahead_to_match_all_except_space = '(?=[^\\s])' return re.compile('' + look_ahead_for_special_token + look_ahead_to_match_all_except_space) def count_file(self, path, verbose=False, add_eos=False): if verbose: logger.info(f'counting file {path} ...') assert os.path.exists(path), f'Input file {path} not found' sents = [] with open(path, 'r', encoding='utf-8') as f: for (idx, line) in enumerate(f): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): if verbose: logger.info(f'counting {len(sents)} sents ...') for (idx, symbols) in enumerate(sents): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '' in self.sym2idx: self.unk_idx = self.sym2idx[''] elif '' in self.sym2idx: self.unk_idx = self.sym2idx[''] else: raise ValueError('Token not in vocabulary and no token in vocabulary for replacement.') def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['pretrained_vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'wb') as f: pickle.dump(self.__dict__, f) return (vocab_file,) def build_vocab(self): if self.vocab_file: logger.info(f'building vocab from {self.vocab_file}') self._build_from_file(self.vocab_file) logger.info(f'Final vocab size {len(self.sym2idx)}') else: logger.info(f'building vocab with min_freq={self.min_freq}, max_size={self.max_size}') self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for (sym, cnt) in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) logger.info(f'Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens') @torch_only_method def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: logger.info(f'encoding file {path} ...') assert os.path.exists(path), f'Output file {path} not found' encoded = [] with open(path, 'r', encoding='utf-8') as f: for (idx, line) in enumerate(f): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded @torch_only_method def encode_sents(self, sents, ordered=False, verbose=False): if verbose: logger.info(f'encoding {len(sents)} sents ...') encoded = [] for (idx, symbols) in enumerate(sents): if verbose and idx > 0 and (idx % 500000 == 0): logger.info(f' line {idx}') encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def move_added_token(self, token: str, target_idx: int): assert token in self.added_tokens_encoder, 'Token which should be moved has to be an added token' assert token not in self.idx2sym, 'Token which should be moved is already in vocab' self.idx2sym.insert(target_idx, token) self.sym2idx[token] = target_idx for idx in range(target_idx + 1, len(self.idx2sym)): current_sym = self.idx2sym[idx] self.sym2idx[current_sym] = idx old_index = self._added_tokens_encoder.pop(token) self._added_tokens_decoder.pop(old_index) def moses_punct_norm(self, text): return self.moses_punct_normalizer.normalize(text) def moses_tokenize(self, text): return self.moses_tokenizer.tokenize(text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split) def moses_pipeline(self, text: str) -> List[str]: text = self.moses_punct_norm(text) text = self.moses_tokenize(text) text = tokenize_numbers(text) return text def _convert_id_to_token(self, idx): assert 0 <= idx < len(self), f'Index {idx} out of vocabulary range' return self.idx2sym[idx] def _convert_token_to_id(self, sym): if sym in self.sym2idx: return self.sym2idx[sym] elif hasattr(self, 'unk_idx'): return self.sym2idx.get(sym, self.unk_idx) elif '' in self.sym2idx: return self.sym2idx[''] elif '' in self.sym2idx: return self.sym2idx[''] else: raise ValueError('Token not in vocabulary and no token in vocabulary for replacement.') def convert_tokens_to_string(self, tokens): out_string = self.moses_detokenizer.detokenize(tokens) return detokenize_numbers(out_string).strip() @torch_only_method def convert_to_tensor(self, symbols): return torch.LongTensor(self.convert_tokens_to_ids(symbols)) @property def vocab_size(self): return len(self.idx2sym) def get_vocab(self): vocab = self.sym2idx.copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() if self.lower_case: line = line.lower() if self.delimiter == '': symbols = line else: symbols = self.moses_pipeline(line) if add_double_eos: return [''] + symbols + [''] elif add_eos: return symbols + [''] else: return symbols class LMOrderedIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None): self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.n_step = data.size(0) // bsz data = data.narrow(0, 0, self.n_step * bsz) self.data = data.view(bsz, -1).t().contiguous().to(device) self.n_batch = (self.n_step + self.bptt - 1) // self.bptt def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i + 1:i + 1 + seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) return (data_out, target_out, seq_len) def get_fixlen_iter(self, start=0): for i in range(start, self.data.size(0) - 1, self.bptt): yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0 bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) (data, target, seq_len) = self.get_batch(i, bptt) i += seq_len yield (data, target, seq_len) if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter() class LMShuffledIterator: def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data))) for idx in epoch_indices: yield self.data[idx] @torch_only_method def stream_iterator(self, sent_stream): streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) n_new = min(len(streams[i]) - 1, self.bptt - n_filled) data[n_retain + n_filled:n_retain + n_filled + n_new, i] = streams[i][:n_new] target[n_filled:n_filled + n_new, i] = streams[i][1:n_new + 1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) yield (data_out, target_out, self.bptt) n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch class TransfoXLCorpus: @classmethod @torch_only_method def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) is_local = os.path.isdir(pretrained_model_name_or_path) try: resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir) except EnvironmentError: logger.error(f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}' was a path or url but couldn't find files {CORPUS_NAME} at this path or url.") return None if is_local: logger.info(f'loading corpus file {resolved_corpus_file}') else: logger.info(f'loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}') corpus = cls(*inputs, **kwargs) corpus_dict = torch.load(resolved_corpus_file) for (key, value) in corpus_dict.items(): corpus.__dict__[key] = value corpus.vocab = vocab if corpus.train is not None: corpus.train = torch.tensor(corpus.train, dtype=torch.long) if corpus.valid is not None: corpus.valid = torch.tensor(corpus.valid, dtype=torch.long) if corpus.test is not None: corpus.test = torch.tensor(corpus.test, dtype=torch.long) return corpus def __init__(self, *args, **kwargs): self.vocab = TransfoXLTokenizer(*args, **kwargs) self.dataset = None self.train = None self.valid = None self.test = None def build_corpus(self, path, dataset): self.dataset = dataset if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']: self.vocab.count_file(os.path.join(path, 'train.txt')) self.vocab.count_file(os.path.join(path, 'valid.txt')) self.vocab.count_file(os.path.join(path, 'test.txt')) elif self.dataset == 'wt103': self.vocab.count_file(os.path.join(path, 'train.txt')) elif self.dataset == 'lm1b': train_path_pattern = os.path.join(path, '1-billion-word-language-modeling-benchmark-r13output', 'training-monolingual.tokenized.shuffled', 'news.en-*') train_paths = glob.glob(train_path_pattern) self.vocab.build_vocab() if self.dataset in ['ptb', 'wt2', 'wt103']: self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'), ordered=True) self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=True) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=True) elif self.dataset in ['enwik8', 'text8']: self.train = self.vocab.encode_file(os.path.join(path, 'train.txt'), ordered=True, add_eos=False) self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=True, add_eos=False) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=True, add_eos=False) elif self.dataset == 'lm1b': self.train = train_paths self.valid = self.vocab.encode_file(os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file(os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == 'train': if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == 'lm1b': kwargs['shuffle'] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ['valid', 'test']: data = self.valid if split == 'valid' else self.test if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == 'lm1b': data_iter = LMShuffledIterator(data, *args, **kwargs) else: data_iter = None raise ValueError(f'Split not recognized: {split}') return data_iter @torch_only_method def get_lm_corpus(datadir, dataset): fn = os.path.join(datadir, 'cache.pt') fn_pickle = os.path.join(datadir, 'cache.pkl') if os.path.exists(fn): logger.info('Loading cached dataset...') corpus = torch.load(fn_pickle) elif os.path.exists(fn): logger.info('Loading cached dataset from pickle...') if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')): raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.") with open(fn, 'rb') as fp: corpus = pickle.load(fp) else: logger.info(f'Producing dataset {dataset}...') kwargs = {} if dataset in ['wt103', 'wt2']: kwargs['special'] = [''] kwargs['lower_case'] = False elif dataset == 'ptb': kwargs['special'] = [''] kwargs['lower_case'] = True elif dataset == 'lm1b': kwargs['special'] = [] kwargs['lower_case'] = False kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt') elif dataset in ['enwik8', 'text8']: pass corpus = TransfoXLCorpus(datadir, dataset, **kwargs) torch.save(corpus, fn) return corpus # File: transformers-main/src/transformers/models/deprecated/tvlt/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_tvlt': ['TvltConfig'], 'feature_extraction_tvlt': ['TvltFeatureExtractor'], 'processing_tvlt': ['TvltProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tvlt'] = ['TvltModel', 'TvltForPreTraining', 'TvltForAudioVisualClassification', 'TvltPreTrainedModel'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_tvlt'] = ['TvltImageProcessor'] if TYPE_CHECKING: from .configuration_tvlt import TvltConfig from .processing_tvlt import TvltProcessor from .feature_extraction_tvlt import TvltFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tvlt import TvltForAudioVisualClassification, TvltForPreTraining, TvltModel, TvltPreTrainedModel try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_tvlt import TvltImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/tvlt/configuration_tvlt.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class TvltConfig(PretrainedConfig): model_type = 'tvlt' def __init__(self, image_size=224, spectrogram_length=2048, frequency_length=128, image_patch_size=[16, 16], audio_patch_size=[16, 16], num_image_channels=3, num_audio_channels=1, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, use_mean_pooling=False, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, pixel_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type='frame-level', task_matching=True, task_mae=True, loss_type='classification', **kwargs): super().__init__(**kwargs) if audio_mask_type not in ('frame-level', 'patch_level'): raise ValueError(f"audio_mask_type must be one of two acceptable strategies - {{'frame_level', 'patch-level') got {audio_mask_type}") self.image_size = image_size self.spectrogram_length = spectrogram_length self.frequency_length = frequency_length self.image_patch_size = image_patch_size self.audio_patch_size = audio_patch_size self.num_image_channels = num_image_channels self.num_audio_channels = num_audio_channels self.num_frames = num_frames self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_mean_pooling = use_mean_pooling self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.pixel_mask_ratio = pixel_mask_ratio self.audio_mask_ratio = audio_mask_ratio self.audio_mask_type = audio_mask_type self.task_matching = task_matching self.task_mae = task_mae self.loss_type = loss_type # File: transformers-main/src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py """""" from math import ceil from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, spectrogram, window_function from ....feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ....utils import TensorType, logging logger = logging.get_logger(__name__) class TvltFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['audio_values', 'audio_mask'] def __init__(self, spectrogram_length=2048, num_channels=1, patch_size=[16, 16], feature_size=128, sampling_rate=44100, hop_length_to_sampling_rate=86, n_fft=2048, padding_value=0.0, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.spectrogram_length = spectrogram_length self.num_channels = num_channels self.patch_size = patch_size self.freq_len = feature_size // self.patch_size[1] self.n_fft = n_fft self.hop_length = sampling_rate // hop_length_to_sampling_rate self.sampling_rate = sampling_rate self.padding_value = padding_value self.mel_filters = mel_filter_bank(num_frequency_bins=1 + n_fft // 2, num_mel_filters=feature_size, min_frequency=0.0, max_frequency=22050.0, sampling_rate=sampling_rate, norm='slaney', mel_scale='slaney').T def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: log_spec = spectrogram(waveform, window_function(self.n_fft, 'hann'), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0) log_spec = log_spec[:, :-1] log_spec = log_spec - 20.0 log_spec = np.clip(log_spec / 40.0, -2.0, 0.0) + 1.0 return log_spec def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=True, sampling_rate: Optional[int]=None, resample: bool=False, mask_audio: bool=False, **kwargs) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'This feature extractor is set to support sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float32) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float32) if not is_batched: raw_speech = [np.asarray([raw_speech]).T] audio_features = [self._np_extract_fbank_features(waveform.squeeze()).T[:self.spectrogram_length] for waveform in raw_speech] if isinstance(audio_features[0], List): audio_features = [np.asarray(feature, dtype=np.float32) for feature in audio_features] max_patch_len = max([ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) if return_attention_mask: audio_mask = [ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0] for feature in audio_features] audio_mask = np.array(audio_mask).astype(np.float32) max_time_len = max_patch_len // self.freq_len * self.patch_size[0] padded_audio_features = np.ones([len(audio_features), 1, max_time_len, self.feature_size]).astype(np.float32) padded_audio_features = padded_audio_features * self.padding_value for i in range(len(audio_features)): feature = audio_features[i] padded_audio_features[i, :, :feature.shape[0], :] = feature if return_attention_mask: data = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: data = {'audio_values': padded_audio_features} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs # File: transformers-main/src/transformers/models/deprecated/tvlt/image_processing_tvlt.py """""" from typing import Dict, List, Optional, Union import numpy as np from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ....image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format from ....image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments from ....utils import TensorType, logging logger = logging.get_logger(__name__) def make_batched(videos) -> List[List[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): videos_dim = np.array(videos[0]).ndim if videos_dim == 3: return [videos] elif videos_dim == 4: return videos elif is_valid_image(videos): videos_dim = np.array(videos).ndim if videos_dim == 3: return [[videos]] elif videos_dim == 4: return [videos] elif videos_dim == 5: return videos raise ValueError(f'Could not make batched video from {videos}') class TvltImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values', 'pixel_mask', 'pixel_values_mixed', 'pixel_mask_mixed'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, patch_size: List[int]=[16, 16], num_frames: int=8, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=IMAGENET_STANDARD_MEAN, image_std: Optional[Union[float, List[float]]]=IMAGENET_STANDARD_STD, init_mask_generator=False, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.patch_size = patch_size self.num_frames = num_frames self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self._valid_processor_keys = ['videos', 'do_resize', 'size', 'patch_size', 'num_frames', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'is_mixed', 'return_tensors', 'data_format', 'input_data_format'] def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size, default_to_square=False) if 'shortest_edge' in size: output_size = get_resize_output_image_size(image, size['shortest_edge'], default_to_square=False, input_data_format=input_data_format) elif 'height' in size and 'width' in size: output_size = (size['height'], size['width']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}") return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def _preprocess_image(self, image: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def preprocess(self, videos: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, patch_size: List[int]=None, num_frames: int=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, is_mixed: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature: do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size') patch_size = patch_size if patch_size is not None else self.patch_size num_frames = num_frames if patch_size is not None else self.num_frames validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(videos): raise ValueError('Invalid image or video type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') videos = make_batched(videos) for video in videos: if len(video) > self.num_frames: raise ValueError(f'number of frames must not be greater than the maximum frames of the model {self.num_frames}.') max_num_frames = max([len(video) for video in videos]) num_patches_per_image = (size['shortest_edge'] // patch_size[0]) ** 2 video_masks = np.array([len(video) * num_patches_per_image * [1] + (max_num_frames - len(video)) * num_patches_per_image * [0] for video in videos]) videos = [[self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in video] for video in videos] if is_mixed: data = {'pixel_values_mixed': videos, 'pixel_mask_mixed': video_masks} else: data = {'pixel_values': videos, 'pixel_mask': video_masks} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/deprecated/tvlt/modeling_tvlt.py """""" import collections.abc import math from copy import deepcopy from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutput, SequenceClassifierOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ....utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_tvlt import TvltConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'TvltConfig' _CHECKPOINT_FOR_DOC = 'ZinengTang/tvlt-base' @dataclass class TvltModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None last_pixel_hidden_state: torch.FloatTensor = None last_audio_hidden_state: torch.FloatTensor = None pixel_label_masks: torch.LongTensor = None audio_label_masks: torch.LongTensor = None pixel_ids_restore: torch.LongTensor = None audio_ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TvltDecoderOutput(ModelOutput): logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class TvltForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None matching_logits: torch.FloatTensor = None pixel_logits: torch.FloatTensor = None audio_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None def generate_pixel_mask_noise(pixel_values, pixel_mask=None, mask_ratio=0.75): (batch_size, seq_len) = pixel_values.shape[:2] noise = torch.rand((batch_size, seq_len), device=pixel_values.device) len_keep = int(seq_len * (1 - mask_ratio)) return (noise, len_keep) def generate_audio_mask_noise(audio_values, audio_mask=None, mask_ratio=0.75, mask_type='patch-level', freq_len=8): (batch_size, seq_len) = audio_values.shape[:2] if mask_type == 'frame-level': num_time_patches = seq_len // freq_len noise = torch.rand(batch_size, num_time_patches, device=audio_values.device).unsqueeze(-1).repeat(1, 1, freq_len).view(batch_size, seq_len) elif mask_type == 'patch-level': noise = torch.rand(batch_size, seq_len, device=audio_values.device) len_keep = int(seq_len * (1 - mask_ratio)) return (noise, len_keep) def random_masking(sequence, noise, len_keep, attention_masks=None): (batch_size, seq_len, hidden_dim) = sequence.shape ids_shuffle = torch.argsort(noise, dim=1) ids_restore = torch.argsort(ids_shuffle, dim=1) ids_keep = ids_shuffle[:, :len_keep] sequence_masked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, hidden_dim)) label_masks = torch.ones([batch_size, seq_len], device=sequence.device) label_masks[:, :len_keep] = 0 label_masks = torch.gather(label_masks, dim=1, index=ids_restore) if attention_masks is not None: label_masks *= attention_masks attention_masks = torch.gather(attention_masks, dim=1, index=ids_keep) return (sequence_masked, attention_masks, label_masks, ids_restore) class TvltPixelEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.patch_embeddings = TvltPixelPatchEmbeddings(config) self.num_patches_per_image = self.patch_embeddings.num_patches_per_image self.type_embed_v = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size)) self.pos_embed_v = nn.Parameter(torch.zeros(1, self.num_patches_per_image, config.hidden_size)) self.config = config def forward(self, pixel_values, attention_masks=None): (batch_size, num_frames, num_channels, height, width) = pixel_values.shape embeddings = self.patch_embeddings(pixel_values) embeddings += self.pos_embed_v.repeat(1, num_frames, 1) embeddings += torch.repeat_interleave(self.temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1) embeddings += self.type_embed_v return (embeddings, attention_masks) class TvltAudioEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.patch_embeddings = TvltAudioPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches self.type_embed_a = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.pos_embed_a = nn.Parameter(torch.zeros(1, self.num_patches // self.num_freq_patches, config.hidden_size)) self.freq_embed = nn.Parameter(torch.zeros(1, self.num_freq_patches, config.hidden_size)) self.num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.config = config def forward(self, audio_values, attention_masks=None): embeddings = self.patch_embeddings(audio_values) num_time_patches = embeddings.size(1) // self.num_freq_patches embeddings += self.freq_embed.repeat(1, num_time_patches, 1) embeddings += torch.repeat_interleave(self.pos_embed_a[:, :num_time_patches], self.num_freq_patches, dim=1) embeddings += self.type_embed_a return (embeddings, attention_masks) class TvltPixelPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (image_size, patch_size) = (config.image_size, config.image_patch_size) (num_channels, hidden_size) = (config.num_image_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches_per_image = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches_per_image = num_patches_per_image self.hidden_size = hidden_size self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: (batch_size, num_frames, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) embeddings = embeddings.reshape(batch_size, num_frames * self.num_patches_per_image, self.hidden_size) return embeddings class TvltAudioPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (spectrogram_length, frequency_length, patch_size) = (config.spectrogram_length, config.frequency_length, config.audio_patch_size) (num_channels, hidden_size) = (config.num_audio_channels, config.hidden_size) spectrogram_size = (spectrogram_length, frequency_length) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = spectrogram_size[1] // patch_size[1] * (spectrogram_size[0] // patch_size[0]) patch_shape = (spectrogram_size[0] // patch_size[0], spectrogram_size[1] // patch_size[1]) self.spectrogram_size = spectrogram_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.patch_shape = patch_shape self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, audio_values: torch.Tensor) -> torch.Tensor: (batch_size, num_channels, height, width) = audio_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if height > self.spectrogram_size[0] or width != self.spectrogram_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.spectrogram_size[0]}*{self.spectrogram_size[1]}).") embeddings = self.projection(audio_values).flatten(2).transpose(1, 2) return embeddings class TvltSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TvltSelfOutput(nn.Module): def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class TvltAttention(nn.Module): def __init__(self, config): super().__init__() self.attention = TvltSelfAttention(config) self.output = TvltSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class TvltIntermediate(nn.Module): def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TvltOutput(nn.Module): def __init__(self, config: TvltConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class TvltLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TvltAttention(config) self.intermediate = TvltIntermediate(config) self.output = TvltOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False): self_attention_outputs = self.attention(self.layernorm_before(hidden_states), attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] hidden_states = attention_output + hidden_states.to(attention_output.device) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class TvltEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TvltLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class TvltPreTrainedModel(PreTrainedModel): config_class = TvltConfig base_model_prefix = 'tvlt' main_input_name = 'pixel_values' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) TVLT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`TvltConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' TVLT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for\n details.\n\n audio_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Audio values. Audio values can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for\n details.\n\n pixel_mask (`torch.FloatTensor` of shape `(batch_size, num_pixel_patches)`):\n Pixel masks. Pixel masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for\n details.\n\n audio_mask (`torch.FloatTensor` of shape `(batch_size, num_audio_patches)`):\n Audio masks. Audio masks can be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for\n details.\n\n pixel_values_mixed (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):\n Pixel values that mix positive and negative samples in Tvlt vision-audio matching. Pixel values mixed can\n be obtained using [`TvltProcessor`]. See [`TvltProcessor.__call__`] for details.\n\n pixel_mask_mixed (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel masks of pixel_values_mixed. Pixel masks mixed can be obtained using [`TvltProcessor`]. See\n [`TvltProcessor.__call__`] for details.\n\n mask_pixel (`bool`, *optional*):\n Whether to mask pixel for MAE tasks. Only set to True in TvltForPreTraining.\n\n mask_audio (`bool`, *optional*):\n Whether to mask audio for MAE tasks. Only set to True in TvltForPreTraining.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare TVLT Model transformer outputting raw hidden-states without any specific head on top.', TVLT_START_DOCSTRING) class TvltModel(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.pixel_embeddings = TvltPixelEmbeddings(config) self.audio_embeddings = TvltAudioEmbeddings(config) self.encoder = TvltEncoder(config) self.cls_embedding = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if config.use_mean_pooling: self.layernorm = None else: self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self): return (self.pixel_embeddings.patch_embeddings, self.audio_embeddings.patch_embeddings) def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvltModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, audio_mask: Optional[torch.FloatTensor]=None, mask_pixel: bool=False, mask_audio: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], TvltModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict (pixel_embedding_output, pixel_mask) = self.pixel_embeddings(pixel_values, pixel_mask) (audio_embedding_output, audio_mask) = self.audio_embeddings(audio_values, audio_mask) pixel_label_masks = None pixel_ids_restore = None if mask_pixel: (pixel_mask_noise, pixel_len_keep) = generate_pixel_mask_noise(pixel_embedding_output, pixel_mask=pixel_mask, mask_ratio=self.config.pixel_mask_ratio) (pixel_embedding_output, pixel_mask, pixel_label_masks, pixel_ids_restore) = random_masking(pixel_embedding_output, pixel_mask_noise, pixel_len_keep, attention_masks=pixel_mask) audio_label_masks = None audio_ids_restore = None if mask_audio: num_freq_patches = self.config.frequency_length // self.config.audio_patch_size[1] (audio_mask_noise, audio_len_keep) = generate_audio_mask_noise(audio_embedding_output, audio_mask=audio_mask, mask_ratio=self.config.audio_mask_ratio, mask_type=self.config.audio_mask_type, freq_len=num_freq_patches) (audio_embedding_output, audio_mask, audio_label_masks, audio_ids_restore) = random_masking(audio_embedding_output, audio_mask_noise, audio_len_keep, attention_masks=audio_mask) batch_size = pixel_values.size(0) embedding_output = torch.cat([self.cls_embedding.repeat(batch_size, 1, 1), pixel_embedding_output, audio_embedding_output], 1) masked_pixel_len = pixel_embedding_output.size(1) attention_mask = None if pixel_mask is not None and audio_mask is not None: attention_mask = torch.cat([pixel_mask[:, :1], pixel_mask, audio_mask], 1) input_shape = embedding_output.size() extended_attention_mask = None if attention_mask is not None: extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if self.layernorm is not None: sequence_output = self.layernorm(sequence_output) pixel_sequence_output = sequence_output[:, 1:1 + masked_pixel_len] audio_sequence_output = sequence_output[:, 1 + masked_pixel_len:] if not return_dict: return (sequence_output, pixel_sequence_output, audio_sequence_output, pixel_label_masks, audio_label_masks, pixel_ids_restore, audio_ids_restore) + encoder_outputs[1:] return TvltModelOutput(last_hidden_state=sequence_output, last_pixel_hidden_state=pixel_sequence_output, last_audio_hidden_state=audio_sequence_output, pixel_label_masks=pixel_label_masks, audio_label_masks=audio_label_masks, pixel_ids_restore=pixel_ids_restore, audio_ids_restore=audio_ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class TvltDecoder(nn.Module): def __init__(self, config): super().__init__() decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList([TvltLayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]) self.layernorm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False self.config = config def forward(self, hidden_states, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, None, output_attentions) else: layer_outputs = layer_module(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) logits = self.layernorm(hidden_states) if not return_dict: return tuple((v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)) return TvltDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) @add_start_docstrings('The TVLT Model transformer with the decoder on top for self-supervised pre-training.', TVLT_START_DOCSTRING) class TvltForPreTraining(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.task_matching = config.task_matching self.task_mae = config.task_mae if not (self.task_matching or self.task_mae): raise ValueError('Must set at least one of matching task and MAE task to true') self.tvlt = TvltModel(config) if self.task_matching: self.matching_head = TvltMatchingHead(config) if self.task_mae: self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=True) self.pixel_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.audio_mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder = TvltDecoder(config) decoder_hidden_size = config.decoder_hidden_size num_frames = config.num_frames num_patches_per_image = self.tvlt.pixel_embeddings.num_patches_per_image self.decoder_pixel_pos_embed = nn.Parameter(torch.zeros(1, num_patches_per_image, decoder_hidden_size)) self.decoder_temporal_embed = nn.Parameter(torch.zeros(1, config.num_frames, decoder_hidden_size)) self.decoder_pixel_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size)) num_audio_patches = self.tvlt.audio_embeddings.num_patches num_freq_patches = config.frequency_length // config.audio_patch_size[1] self.decoder_audio_pos_embed = nn.Parameter(torch.zeros(1, num_audio_patches // num_freq_patches, decoder_hidden_size)) self.decoder_freq_embed = nn.Parameter(torch.zeros(1, num_freq_patches, decoder_hidden_size)) self.decoder_audio_type_embed = nn.Parameter(torch.zeros(1, 1, decoder_hidden_size)) pixel_mae_output_dim = self.config.image_patch_size[0] ** 2 * self.config.num_image_channels self.pixel_mae_head = TvltMAEHead(config, pixel_mae_output_dim) audio_mae_output_dim = self.config.audio_patch_size[0] * self.config.audio_patch_size[1] * self.config.num_audio_channels self.audio_mae_head = TvltMAEHead(config, audio_mae_output_dim) self.num_frames = num_frames self.num_patches_per_image = num_patches_per_image self.num_freq_patches = num_freq_patches self.image_patch_size = config.image_patch_size self.audio_patch_size = config.audio_patch_size self.post_init() def patchify_pixel(self, pixel_values): (batch_size, num_frames, num_channels, height, width) = pixel_values.shape num_patches_height = pixel_values.shape[3] // self.image_patch_size[0] num_patches_width = pixel_values.shape[4] // self.image_patch_size[1] patchified_pixel_values = pixel_values.reshape(shape=(batch_size, num_frames, num_channels, num_patches_height, self.image_patch_size[0], num_patches_width, self.image_patch_size[1])) patchified_pixel_values = torch.einsum('ntchpwq->nthwpqc', patchified_pixel_values) patchified_pixel_values = patchified_pixel_values.reshape(shape=(batch_size, num_patches_height * num_patches_width * num_frames, self.image_patch_size[0] * self.image_patch_size[1] * num_channels)) return patchified_pixel_values def patchify_audio(self, audio_values): (batch_size, num_channels, height, width) = audio_values.shape num_patches_height = height // self.audio_patch_size[0] num_patches_width = width // self.audio_patch_size[1] patchified_audio_values = audio_values.reshape(shape=(batch_size, num_channels, num_patches_height, self.audio_patch_size[0], num_patches_width, self.audio_patch_size[1])) patchified_audio_values = torch.einsum('nchpwq->nhwpqc', patchified_audio_values) patchified_audio_values = patchified_audio_values.reshape(shape=(batch_size, num_patches_height * num_patches_width, self.audio_patch_size[0] * self.audio_patch_size[1] * num_channels)) return patchified_audio_values def pixel_mae_loss(self, pixel_values, pixel_predictions, mask): patchified_pixel_values = self.patchify_pixel(pixel_values) loss = (pixel_predictions - patchified_pixel_values) ** 2 loss = loss.mean(dim=-1) loss = (loss * mask).sum() / mask.sum() return loss def audio_mae_loss(self, audio_values, audio_predictions, mask): patchified_audio_values = self.patchify_audio(audio_values) loss = (audio_predictions - patchified_audio_values) ** 2 loss = loss.mean(dim=-1) loss = (loss * mask).sum() / mask.sum() return loss def concatenate_mask(self, mask_token, sequence, ids_restore): (batch_size, seq_length, dim) = sequence.shape mask_tokens = mask_token.repeat(batch_size, ids_restore.shape[1] - seq_length, 1) padded_sequence = torch.cat([sequence, mask_tokens], dim=1) padded_sequence = torch.gather(padded_sequence, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, dim)) return padded_sequence @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TvltForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, audio_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, pixel_values_mixed: Optional[torch.FloatTensor]=None, pixel_mask_mixed: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], TvltForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict total_loss = 0.0 if self.task_matching: if labels is None: raise ValueError('Matching task requires labels') if pixel_values_mixed is None: raise ValueError('Matching task requires pixel_values_mixed') outputs = self.tvlt(pixel_values_mixed, audio_values, pixel_mask=pixel_mask_mixed, audio_mask=audio_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] matching_logits = self.matching_head(sequence_output) loss_fct = BCEWithLogitsLoss() loss = loss_fct(matching_logits.view(-1), labels.view(-1)) total_loss += loss pixel_logits = None audio_logits = None if self.task_mae and self.training: outputs = self.tvlt(pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask, mask_pixel=True, mask_audio=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pixel_sequence_output = outputs.last_pixel_hidden_state if return_dict else outputs[1] audio_sequence_output = outputs.last_audio_hidden_state if return_dict else outputs[2] pixel_label_masks = outputs.pixel_label_masks if return_dict else outputs[3] audio_label_masks = outputs.audio_label_masks if return_dict else outputs[4] pixel_ids_restore = outputs.pixel_ids_restore if return_dict else outputs[5] audio_ids_restore = outputs.audio_ids_restore if return_dict else outputs[6] pixel_decoder_input = self.encoder_to_decoder(pixel_sequence_output) audio_decoder_input = self.encoder_to_decoder(audio_sequence_output) num_frames = pixel_values.size(1) pixel_decoder_input = self.concatenate_mask(self.pixel_mask_token, pixel_decoder_input, pixel_ids_restore) pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_pos_embed.repeat(1, num_frames, 1) pixel_decoder_input = pixel_decoder_input + torch.repeat_interleave(self.decoder_temporal_embed[:, :num_frames], self.num_patches_per_image, dim=1) pixel_decoder_input = pixel_decoder_input + self.decoder_pixel_type_embed pixel_decoder_outputs = self.decoder(pixel_decoder_input) pixel_logits = self.pixel_mae_head(pixel_decoder_outputs.logits) audio_decoder_input = self.concatenate_mask(self.audio_mask_token, audio_decoder_input, audio_ids_restore) num_time_patches = audio_decoder_input.size(1) // self.num_freq_patches audio_decoder_input = audio_decoder_input + self.decoder_freq_embed.repeat(1, num_time_patches, 1) audio_decoder_input = audio_decoder_input + torch.repeat_interleave(self.decoder_audio_pos_embed[:, :num_time_patches], self.num_freq_patches, dim=1) audio_decoder_input = audio_decoder_input + self.decoder_audio_type_embed audio_decoder_outputs = self.decoder(audio_decoder_input) audio_logits = self.audio_mae_head(audio_decoder_outputs.logits) loss = self.pixel_mae_loss(pixel_values, pixel_logits, pixel_label_masks) + self.audio_mae_loss(audio_values, audio_logits, audio_label_masks) total_loss += loss if not return_dict: output = (matching_logits, pixel_logits, audio_logits) + outputs[7:] return (total_loss,) + output if loss is not None else output return TvltForPreTrainingOutput(loss=total_loss, matching_logits=matching_logits, pixel_logits=pixel_logits, audio_logits=audio_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class TvltPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class TvltMatchingHead(nn.Module): def __init__(self, config): super().__init__() self.pooler = TvltPooler(config) self.fc = nn.Linear(config.hidden_size, 1) def forward(self, hidden_states): hidden_states = self.fc(self.pooler(hidden_states)) return hidden_states class TvltMAEHead(nn.Module): def __init__(self, config, output_dim=None): super().__init__() self.config = config self.decoder = nn.Linear(config.decoder_hidden_size, output_dim) def forward(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states @add_start_docstrings('\n Tvlt Model transformer with a classifier head on top (an MLP on top of the final hidden state of the [CLS] token)\n for audiovisual classification tasks, e.g. CMU-MOSEI Sentiment Analysis and Audio to Video Retrieval.\n ', TVLT_START_DOCSTRING) class TvltForAudioVisualClassification(TvltPreTrainedModel): def __init__(self, config): super().__init__(config) self.tvlt = TvltModel(config) self.classifier = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size * 2), nn.LayerNorm(config.hidden_size * 2, eps=config.layer_norm_eps), nn.GELU(), nn.Linear(config.hidden_size * 2, config.num_labels)) self.config = config self.post_init() @add_start_docstrings_to_model_forward(TVLT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, audio_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, audio_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tvlt(pixel_values, audio_values, pixel_mask=pixel_mask, audio_mask=audio_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0][:, 0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.loss_type == 'regression': loss_fct = MSELoss() loss = loss_fct(logits, labels) elif self.config.loss_type == 'classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[4:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/tvlt/processing_tvlt.py """""" from ....processing_utils import ProcessorMixin class TvltProcessor(ProcessorMixin): attributes = ['image_processor', 'feature_extractor'] image_processor_class = 'TvltImageProcessor' feature_extractor_class = 'TvltFeatureExtractor' def __init__(self, image_processor, feature_extractor): super().__init__(image_processor=image_processor, feature_extractor=feature_extractor) self.image_processor = image_processor self.feature_extractor = feature_extractor def __call__(self, images=None, audio=None, images_mixed=None, sampling_rate=None, mask_audio=False, mask_pixel=False, *args, **kwargs): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') images_mixed_dict = None if images is not None: images_dict = self.image_processor(images, *args, mask_pixel=mask_pixel, **kwargs) if images_mixed is not None: images_mixed_dict = self.image_processor(images_mixed, *args, is_mixed=True, **kwargs) if audio is not None: audio_dict = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, mask_audio=mask_audio, **kwargs) output_dict = {} if audio is not None: output_dict.update(audio_dict) if images is not None: output_dict.update(images_dict) if images_mixed_dict is not None: output_dict.update(images_mixed_dict) return output_dict @property def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names)) # File: transformers-main/src/transformers/models/deprecated/van/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_van': ['VanConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_van'] = ['VanForImageClassification', 'VanModel', 'VanPreTrainedModel'] if TYPE_CHECKING: from .configuration_van import VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import VanForImageClassification, VanModel, VanPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/deprecated/van/configuration_van.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class VanConfig(PretrainedConfig): model_type = 'van' def __init__(self, image_size=224, num_channels=3, patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-06, layer_scale_init_value=0.01, drop_path_rate=0.0, dropout_rate=0.0, **kwargs): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.patch_sizes = patch_sizes self.strides = strides self.hidden_sizes = hidden_sizes self.depths = depths self.mlp_ratios = mlp_ratios self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.dropout_rate = dropout_rate # File: transformers-main/src/transformers/models/deprecated/van/convert_van_to_pytorch.py """""" import argparse import json import sys from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import torch import torch.nn as nn from huggingface_hub import cached_download, hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, VanConfig, VanForImageClassification from transformers.models.deprecated.van.modeling_van import VanLayerScaling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) @dataclass class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: if not isinstance(m, VanLayerScaling): self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 0 src_skip: List = field(default_factory=list) dest_skip: List = field(default_factory=list) def __call__(self, x: Tensor): dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced): raise Exception(f'Numbers of operations are different. Source module has {len(src_traced)} operations while destination module has {len(dest_traced)}.') for (dest_m, src_m) in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}') def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module: from_state_dict = from_model.state_dict() our_state_dict = our_model.state_dict() config = our_model.config all_keys = [] for stage_idx in range(len(config.hidden_sizes)): for block_id in range(config.depths[stage_idx]): from_key = f'block{stage_idx + 1}.{block_id}.layer_scale_1' to_key = f'van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight' all_keys.append((from_key, to_key)) from_key = f'block{stage_idx + 1}.{block_id}.layer_scale_2' to_key = f'van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight' all_keys.append((from_key, to_key)) for (from_key, to_key) in all_keys: our_state_dict[to_key] = from_state_dict.pop(from_key) our_model.load_state_dict(our_state_dict) return our_model def convert_weight_and_push(name: str, config: VanConfig, checkpoint: str, from_model: nn.Module, save_directory: Path, push_to_hub: bool=True): print(f'Downloading weights for {name}...') checkpoint_path = cached_download(checkpoint) print(f'Converting {name}...') from_state_dict = torch.load(checkpoint_path)['state_dict'] from_model.load_state_dict(from_state_dict) from_model.eval() with torch.no_grad(): our_model = VanForImageClassification(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model) x = torch.randn((1, 3, 224, 224)) module_transfer(x) our_model = copy_parameters(from_model, our_model) if not torch.allclose(from_model(x), our_model(x).logits): raise ValueError("The model logits don't match the original one.") checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.push_to_hub(repo_path_or_name=save_directory / checkpoint_name, commit_message='Add model', use_temp_dir=True) image_processor = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k') image_processor.push_to_hub(repo_path_or_name=save_directory / checkpoint_name, commit_message='Add image processor', use_temp_dir=True) print(f'Pushed {checkpoint_name}') def convert_weights_and_push(save_directory: Path, model_name: str=None, push_to_hub: bool=True): filename = 'imagenet-1k-id2label.json' num_labels = 1000 repo_id = 'huggingface/label-files' num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} id2label = id2label label2id = {v: k for (k, v) in id2label.items()} ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = {'van-tiny': ImageNetPreTrainedConfig(hidden_sizes=[32, 64, 160, 256], depths=[3, 3, 5, 2], mlp_ratios=[8, 8, 4, 4]), 'van-small': ImageNetPreTrainedConfig(hidden_sizes=[64, 128, 320, 512], depths=[2, 2, 4, 2], mlp_ratios=[8, 8, 4, 4]), 'van-base': ImageNetPreTrainedConfig(hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4]), 'van-large': ImageNetPreTrainedConfig(hidden_sizes=[64, 128, 320, 512], depths=[3, 5, 27, 3], mlp_ratios=[8, 8, 4, 4])} names_to_original_models = {'van-tiny': van_tiny, 'van-small': van_small, 'van-base': van_base, 'van-large': van_large} names_to_original_checkpoints = {'van-tiny': 'https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar', 'van-small': 'https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar', 'van-base': 'https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar', 'van-large': 'https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar'} if model_name: convert_weight_and_push(model_name, names_to_config[model_name], checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub) else: for (model_name, config) in names_to_config.items(): convert_weight_and_push(model_name, config, checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model-name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported resnet* architecture, currently: van-tiny/small/base/large. If `None`, all of them will the converted.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.') parser.add_argument('--van_dir', required=True, type=Path, help="A path to VAN's original implementation directory. You can download from here: https://github.com/Visual-Attention-Network/VAN-Classification") parser.add_argument('--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.') args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) van_dir = args.van_dir sys.path.append(str(van_dir.parent)) from van.models.van import van_base, van_large, van_small, van_tiny convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) # File: transformers-main/src/transformers/models/deprecated/van/modeling_van.py """""" import math from collections import OrderedDict from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ....modeling_utils import PreTrainedModel from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_van import VanConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'VanConfig' _CHECKPOINT_FOR_DOC = 'Visual-Attention-Network/van-base' _EXPECTED_OUTPUT_SHAPE = [1, 512, 7, 7] _IMAGE_CLASS_CHECKPOINT = 'Visual-Attention-Network/van-base' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class VanDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class VanOverlappingPatchEmbedder(nn.Module): def __init__(self, in_channels: int, hidden_size: int, patch_size: int=7, stride: int=4): super().__init__() self.convolution = nn.Conv2d(in_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2) self.normalization = nn.BatchNorm2d(hidden_size) def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) return hidden_state class VanMlpLayer(nn.Module): def __init__(self, in_channels: int, hidden_size: int, out_channels: int, hidden_act: str='gelu', dropout_rate: float=0.5): super().__init__() self.in_dense = nn.Conv2d(in_channels, hidden_size, kernel_size=1) self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size) self.activation = ACT2FN[hidden_act] self.dropout1 = nn.Dropout(dropout_rate) self.out_dense = nn.Conv2d(hidden_size, out_channels, kernel_size=1) self.dropout2 = nn.Dropout(dropout_rate) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.in_dense(hidden_state) hidden_state = self.depth_wise(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.dropout1(hidden_state) hidden_state = self.out_dense(hidden_state) hidden_state = self.dropout2(hidden_state) return hidden_state class VanLargeKernelAttention(nn.Module): def __init__(self, hidden_size: int): super().__init__() self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=5, padding=2, groups=hidden_size) self.depth_wise_dilated = nn.Conv2d(hidden_size, hidden_size, kernel_size=7, dilation=3, padding=9, groups=hidden_size) self.point_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=1) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.depth_wise(hidden_state) hidden_state = self.depth_wise_dilated(hidden_state) hidden_state = self.point_wise(hidden_state) return hidden_state class VanLargeKernelAttentionLayer(nn.Module): def __init__(self, hidden_size: int): super().__init__() self.attention = VanLargeKernelAttention(hidden_size) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: attention = self.attention(hidden_state) attended = hidden_state * attention return attended class VanSpatialAttentionLayer(nn.Module): def __init__(self, hidden_size: int, hidden_act: str='gelu'): super().__init__() self.pre_projection = nn.Sequential(OrderedDict([('conv', nn.Conv2d(hidden_size, hidden_size, kernel_size=1)), ('act', ACT2FN[hidden_act])])) self.attention_layer = VanLargeKernelAttentionLayer(hidden_size) self.post_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.pre_projection(hidden_state) hidden_state = self.attention_layer(hidden_state) hidden_state = self.post_projection(hidden_state) hidden_state = hidden_state + residual return hidden_state class VanLayerScaling(nn.Module): def __init__(self, hidden_size: int, initial_value: float=0.01): super().__init__() self.weight = nn.Parameter(initial_value * torch.ones(hidden_size), requires_grad=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.weight.unsqueeze(-1).unsqueeze(-1) * hidden_state return hidden_state class VanLayer(nn.Module): def __init__(self, config: VanConfig, hidden_size: int, mlp_ratio: int=4, drop_path_rate: float=0.5): super().__init__() self.drop_path = VanDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.pre_normomalization = nn.BatchNorm2d(hidden_size) self.attention = VanSpatialAttentionLayer(hidden_size, config.hidden_act) self.attention_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value) self.post_normalization = nn.BatchNorm2d(hidden_size) self.mlp = VanMlpLayer(hidden_size, hidden_size * mlp_ratio, hidden_size, config.hidden_act, config.dropout_rate) self.mlp_scaling = VanLayerScaling(hidden_size, config.layer_scale_init_value) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.pre_normomalization(hidden_state) hidden_state = self.attention(hidden_state) hidden_state = self.attention_scaling(hidden_state) hidden_state = self.drop_path(hidden_state) hidden_state = residual + hidden_state residual = hidden_state hidden_state = self.post_normalization(hidden_state) hidden_state = self.mlp(hidden_state) hidden_state = self.mlp_scaling(hidden_state) hidden_state = self.drop_path(hidden_state) hidden_state = residual + hidden_state return hidden_state class VanStage(nn.Module): def __init__(self, config: VanConfig, in_channels: int, hidden_size: int, patch_size: int, stride: int, depth: int, mlp_ratio: int=4, drop_path_rate: float=0.0): super().__init__() self.embeddings = VanOverlappingPatchEmbedder(in_channels, hidden_size, patch_size, stride) self.layers = nn.Sequential(*[VanLayer(config, hidden_size, mlp_ratio=mlp_ratio, drop_path_rate=drop_path_rate) for _ in range(depth)]) self.normalization = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.embeddings(hidden_state) hidden_state = self.layers(hidden_state) (batch_size, hidden_size, height, width) = hidden_state.shape hidden_state = hidden_state.flatten(2).transpose(1, 2) hidden_state = self.normalization(hidden_state) hidden_state = hidden_state.view(batch_size, height, width, hidden_size).permute(0, 3, 1, 2) return hidden_state class VanEncoder(nn.Module): def __init__(self, config: VanConfig): super().__init__() self.stages = nn.ModuleList([]) patch_sizes = config.patch_sizes strides = config.strides hidden_sizes = config.hidden_sizes depths = config.depths mlp_ratios = config.mlp_ratios drop_path_rates = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] for (num_stage, (patch_size, stride, hidden_size, depth, mlp_expantion, drop_path_rate)) in enumerate(zip(patch_sizes, strides, hidden_sizes, depths, mlp_ratios, drop_path_rates)): is_first_stage = num_stage == 0 in_channels = hidden_sizes[num_stage - 1] if is_first_stage: in_channels = config.num_channels self.stages.append(VanStage(config, in_channels, hidden_size, patch_size=patch_size, stride=stride, depth=depth, mlp_ratio=mlp_expantion, drop_path_rate=drop_path_rate)) def forward(self, hidden_state: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, BaseModelOutputWithNoAttention]: all_hidden_states = () if output_hidden_states else None for (_, stage_module) in enumerate(self.stages): hidden_state = stage_module(hidden_state) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, all_hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=all_hidden_states) class VanPreTrainedModel(PreTrainedModel): config_class = VanConfig base_model_prefix = 'van' main_input_name = 'pixel_values' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): nn.init.trunc_normal_(module.weight, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: nn.init.constant_(module.bias, 0) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.bias, 0) nn.init.constant_(module.weight, 1.0) elif isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() VAN_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`VanConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' VAN_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.', VAN_START_DOCSTRING) class VanModel(VanPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = VanEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor], output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state.mean(dim=[-2, -1]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) @add_start_docstrings('\n VAN Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', VAN_START_DOCSTRING) class VanForImageClassification(VanPreTrainedModel): def __init__(self, config): super().__init__(config) self.van = VanModel(config) self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.van(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = 'regression' elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) # File: transformers-main/src/transformers/models/deprecated/vit_hybrid/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_vit_hybrid': ['ViTHybridConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_vit_hybrid'] = ['ViTHybridForImageClassification', 'ViTHybridModel', 'ViTHybridPreTrainedModel'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_vit_hybrid'] = ['ViTHybridImageProcessor'] if TYPE_CHECKING: from .configuration_vit_hybrid import ViTHybridConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_hybrid import ViTHybridForImageClassification, ViTHybridModel, ViTHybridPreTrainedModel try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vit_hybrid import ViTHybridImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/vit_hybrid/configuration_vit_hybrid.py """""" from ....configuration_utils import PretrainedConfig from ....utils import logging from ...auto.configuration_auto import CONFIG_MAPPING from ...bit import BitConfig logger = logging.get_logger(__name__) class ViTHybridConfig(PretrainedConfig): model_type = 'vit-hybrid' def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=1, num_channels=3, backbone_featmap_shape=[1, 1024, 24, 24], qkv_bias=True, **kwargs): super().__init__(**kwargs) if use_pretrained_backbone: raise ValueError('Pretrained backbones are not supported yet.') if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info('`backbone_config` is `None`. Initializing the config with a `BiT` backbone.') backbone_config = {'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage3'], 'embedding_dynamic_padding': True} if backbone_kwargs is not None and backbone_kwargs and (backbone_config is not None): raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") if isinstance(backbone_config, dict): if 'model_type' in backbone_config: backbone_config_class = CONFIG_MAPPING[backbone_config['model_type']] else: logger.info('`model_type` is not found in `backbone_config`. Use `Bit` as the backbone configuration class.') backbone_config_class = BitConfig backbone_config = backbone_config_class(**backbone_config) self.backbone_featmap_shape = backbone_featmap_shape self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias # File: transformers-main/src/transformers/models/deprecated/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config, base_model=False): rename_keys = [] rename_keys.append(('cls_token', 'vit.embeddings.cls_token')) rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings')) rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias')) rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight')) rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight')) rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight')) rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias')) for i in range(config.num_hidden_layers): rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias')) if base_model: rename_keys.extend([('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias')]) rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith('vit') else pair for pair in rename_keys] else: rename_keys.extend([('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias')]) return rename_keys def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = '' else: prefix = 'vit.' in_proj_weight = state_dict.pop(f'blocks.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'blocks.{i}.attn.qkv.bias') state_dict[f'{prefix}encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'{prefix}encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:config.hidden_size] state_dict[f'{prefix}encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'{prefix}encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[config.hidden_size:config.hidden_size * 2] state_dict[f'{prefix}encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'{prefix}encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-config.hidden_size:] def remove_classification_head_(state_dict): ignore_keys = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path, push_to_hub=False): backbone_config = BitConfig(global_padding='same', layer_type='bottleneck', depths=(3, 4, 9), out_features=['stage3'], embedding_dynamic_padding=True) config = ViTHybridConfig(backbone_config=backbone_config, image_size=384, num_labels=1000) base_model = False timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() state_dict = timm_model.state_dict() if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) repo_id = 'huggingface/label-files' filename = 'imagenet-1k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} if vit_name[-5:] == 'in21k': model = ViTHybridModel(config).eval() else: model = ViTHybridForImageClassification(config).eval() model.load_state_dict(state_dict) transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = {'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST} processor = ViTHybridImageProcessor(do_resize=True, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist()) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors='pt').pixel_values assert torch.allclose(timm_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print('Predicted class:', logits.argmax(-1).item()) if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=0.001) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=0.001) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving processor to {pytorch_dump_folder_path}') processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing model and processor to the hub {vit_name}') model.push_to_hub(f'ybelkada/{vit_name}') processor.push_to_hub(f'ybelkada/{vit_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--vit_name', default='vit_base_r50_s16_384', type=str, help="Name of the hybrid ViT timm model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.') args = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/deprecated/vit_hybrid/image_processing_vit_hybrid.py """""" from typing import Dict, List, Optional, Union import numpy as np from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ....image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format from ....image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments from ....utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL class ViTHybridImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = ['images', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format'] def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/deprecated/vit_hybrid/modeling_vit_hybrid.py """""" import collections.abc import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, torch_int from ....utils.backbone_utils import load_backbone from .configuration_vit_hybrid import ViTHybridConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'ViTHybridConfig' _CHECKPOINT_FOR_DOC = 'google/vit-hybrid-base-bit-384' _EXPECTED_OUTPUT_SHAPE = [1, 197, 768] _IMAGE_CLASS_CHECKPOINT = 'google/vit-hybrid-base-bit-384' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' class ViTHybridEmbeddings(nn.Module): def __init__(self, config: ViTHybridConfig, use_mask_token: bool=False) -> None: super().__init__() self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTHybridPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class ViTHybridPatchEmbeddings(nn.Module): def __init__(self, config, feature_size=None): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.backbone = load_backbone(config) if self.backbone.config.model_type != 'bit': raise ValueError(f'Backbone model type {self.backbone.model_type} is not supported.') feature_dim = self.backbone.channels[-1] if feature_size is None: feature_map = config.backbone_featmap_shape feature_size = feature_map[-2:] feature_dim = feature_map[1] else: feature_size = feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size) feature_dim = self.backbone.channels[-1] self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (_, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") features = self.backbone(pixel_values).feature_maps[-1] embeddings = self.projection(features).flatten(2).transpose(1, 2) return embeddings class ViTHybridSelfAttention(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class ViTHybridSdpaSelfAttention(ViTHybridSelfAttention): def __init__(self, config: ViTHybridConfig) -> None: super().__init__(config) self.attention_probs_dropout_prob = config.attention_probs_dropout_prob def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, head_mask, self.attention_probs_dropout_prob if self.training else 0.0, is_causal=False, scale=None) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) return (context_layer, None) class ViTHybridSelfOutput(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ViTHybridAttention(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.attention = ViTHybridSelfAttention(config) self.output = ViTHybridSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ViTHybridSdpaAttention(ViTHybridAttention): def __init__(self, config: ViTHybridConfig) -> None: super().__init__(config) self.attention = ViTHybridSdpaSelfAttention(config) class ViTHybridIntermediate(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ViTHybridOutput(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states VIT_HYBRID_ATTENTION_CLASSES = {'eager': ViTHybridAttention, 'sdpa': ViTHybridSdpaAttention} class ViTHybridLayer(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VIT_HYBRID_ATTENTION_CLASSES[config._attn_implementation](config) self.intermediate = ViTHybridIntermediate(config) self.output = ViTHybridOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] hidden_states = attention_output + hidden_states.to(attention_output.device) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class ViTHybridEncoder(nn.Module): def __init__(self, config: ViTHybridConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ViTHybridLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class ViTHybridPreTrainedModel(PreTrainedModel): config_class = ViTHybridConfig base_model_prefix = 'vit' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _no_split_modules = ['ViTHybridEmbeddings', 'ViTHybridLayer'] _supports_sdpa = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ViTHybridEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype) module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.cls_token.dtype) VIT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViTHybridConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' VIT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ViTHybridImageProcessor.__call__`] for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare ViT Hybrid Model transformer outputting raw hidden-states without any specific head on top.', VIT_START_DOCSTRING) class ViTHybridModel(ViTHybridPreTrainedModel): def __init__(self, config: ViTHybridConfig, add_pooling_layer: bool=True, use_mask_token: bool=False): super().__init__(config) self.config = config self.embeddings = ViTHybridEmbeddings(config, use_mask_token=use_mask_token) self.encoder = ViTHybridEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTHybridPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> ViTHybridPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype if pixel_values.dtype != expected_dtype: pixel_values = pixel_values.to(expected_dtype) embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class ViTHybridPooler(nn.Module): def __init__(self, config: ViTHybridConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @add_start_docstrings('\n ViT Hybrid Model transformer with an image classification head on top (a linear layer on top of the final hidden\n state of the [CLS] token) e.g. for ImageNet.\n ', VIT_START_DOCSTRING) class ViTHybridForImageClassification(ViTHybridPreTrainedModel): def __init__(self, config: ViTHybridConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.vit = ViTHybridModel(config, add_pooling_layer=False) self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/deprecated/xlm_prophetnet/__init__.py from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available _import_structure = {'configuration_xlm_prophetnet': ['XLMProphetNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_xlm_prophetnet'] = ['XLMProphetNetTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_xlm_prophetnet'] = ['XLMProphetNetDecoder', 'XLMProphetNetEncoder', 'XLMProphetNetForCausalLM', 'XLMProphetNetForConditionalGeneration', 'XLMProphetNetModel', 'XLMProphetNetPreTrainedModel'] if TYPE_CHECKING: from .configuration_xlm_prophetnet import XLMProphetNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_prophetnet import XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, XLMProphetNetPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/deprecated/xlm_prophetnet/configuration_xlm_prophetnet.py """""" from typing import Callable, Optional, Union from ....configuration_utils import PretrainedConfig from ....utils import logging logger = logging.get_logger(__name__) class XLMProphetNetConfig(PretrainedConfig): model_type = 'xlm-prophetnet' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'num_encoder_attention_heads'} def __init__(self, activation_dropout: Optional[float]=0.1, activation_function: Optional[Union[str, Callable]]='gelu', vocab_size: Optional[int]=30522, hidden_size: Optional[int]=1024, encoder_ffn_dim: Optional[int]=4096, num_encoder_layers: Optional[int]=12, num_encoder_attention_heads: Optional[int]=16, decoder_ffn_dim: Optional[int]=4096, num_decoder_layers: Optional[int]=12, num_decoder_attention_heads: Optional[int]=16, attention_dropout: Optional[float]=0.1, dropout: Optional[float]=0.1, max_position_embeddings: Optional[int]=512, init_std: Optional[float]=0.02, is_encoder_decoder: Optional[bool]=True, add_cross_attention: Optional[bool]=True, decoder_start_token_id: Optional[int]=0, ngram: Optional[int]=2, num_buckets: Optional[int]=32, relative_max_distance: Optional[int]=128, disable_ngram_loss: Optional[bool]=False, eps: Optional[float]=0.0, use_cache: Optional[bool]=True, pad_token_id: Optional[int]=0, bos_token_id: Optional[int]=1, eos_token_id: Optional[int]=2, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.encoder_ffn_dim = encoder_ffn_dim self.num_encoder_layers = num_encoder_layers self.num_encoder_attention_heads = num_encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.num_decoder_layers = num_decoder_layers self.num_decoder_attention_heads = num_decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std self.activation_function = activation_function self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.eps = eps self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, add_cross_attention=add_cross_attention, decoder_start_token_id=decoder_start_token_id, **kwargs) @property def num_hidden_layers(self) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError('This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and `num_decoder_layers`.') # File: transformers-main/src/transformers/models/deprecated/xlm_prophetnet/modeling_xlm_prophetnet.py """""" import copy import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import LayerNorm from ....activations import ACT2FN from ....modeling_outputs import BaseModelOutput from ....modeling_utils import PreTrainedModel from ....utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_xlm_prophetnet import XLMProphetNetConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'XLMProphetNetConfig' XLM_PROPHETNET_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted\n from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the\n file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`.\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and\n behavior.\n\n Parameters:\n config ([`XLMProphetNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' XLM_PROPHETNET_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n XLMProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def softmax(hidden_state, dim, onnx_trace=False): if onnx_trace: return nn.functional.softmax(hidden_state.float(), dim=dim) else: return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32) def ngram_attention_bias(sequence_length, ngram, device, dtype): left_block = torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min right_block = left_block.detach().clone() for stream_idx in range(ngram): right_block[stream_idx].fill_diagonal_(0, wrap=False) left_block[stream_idx].triu_(-stream_idx + 1) left_block[:, :, 0] = 0 return torch.cat([left_block, right_block], dim=2) def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False): inv_relative_positions = -relative_positions rel_positions_bucket = 0 if is_bidirectional: num_buckets = num_buckets // 2 rel_positions_bucket = rel_positions_bucket + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets inv_relative_positions = torch.abs(inv_relative_positions) else: inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions)) max_exact = num_buckets // 2 is_small = torch.lt(inv_relative_positions, max_exact) val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int() rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large) return rel_positions_bucket def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids): main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1) main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1) predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1) predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1) predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1) main_relative_position_buckets = compute_relative_buckets(num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False) predict_relative_position_buckets = compute_relative_buckets(num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False) return (main_relative_position_buckets, predict_relative_position_buckets) @dataclass class XLMProphetNetSeq2SeqLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn('`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.', FutureWarning) return self.cross_attentions @dataclass class XLMProphetNetSeq2SeqModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @property def decoder_cross_attentions(self): warnings.warn('`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions` instead.', FutureWarning) return self.cross_attentions @dataclass class XLMProphetNetDecoderModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor last_hidden_state_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLMProphetNetDecoderLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_ngram: Optional[torch.FloatTensor] = None past_key_values: Optional[Tuple[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None class XLMProphetNetPreTrainedModel(PreTrainedModel): config_class = XLMProphetNetConfig base_model_prefix = 'prophetnet' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, 'self.model.config.decoder_start_token_id has to be defined. In XLMProphetNet it is usually set to the pad_token_id. See XLMProphetNet docs for more information' shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, 'self.model.config.pad_token_id has to be defined.' shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), 'Verify that `shifted_input_ids` has only positive values' return shifted_input_ids class XLMProphetNetPositionalEmbeddings(nn.Embedding): def __init__(self, config: XLMProphetNetConfig) -> None: self.max_length = config.max_position_embeddings super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id) def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): assert position_ids is None or self.padding_idx is None, 'If position_ids is pre-computed then padding_idx should not be set.' if position_ids is None: if past_key_values is not None: prev_num_input_ids = past_key_values[0][0].shape[2] num_input_ids = inputs_shape[1] + prev_num_input_ids position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * int(self.padding_idx + num_input_ids) else: if attention_mask is None: attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) position_ids = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() + self.padding_idx position_ids = position_ids.clamp(0, self.max_length - 1) return (super().forward(position_ids), position_ids) def _forward(self, position_ids): return super().forward(position_ids) class XLMProphetNetAttention(nn.Module): def __init__(self, config: XLMProphetNetConfig, num_attn_heads: int): super().__init__() hidden_size = config.hidden_size self.attention_dropout = config.attention_dropout self.dropout = config.dropout self.num_attn_heads = num_attn_heads self.head_dim = hidden_size // num_attn_heads assert self.head_dim * num_attn_heads == hidden_size, '`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and `config.num_decoder_attention_heads`' self.key_proj = nn.Linear(hidden_size, hidden_size) self.value_proj = nn.Linear(hidden_size, hidden_size) self.query_proj = nn.Linear(hidden_size, hidden_size) self.out_proj = nn.Linear(hidden_size, hidden_size) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states, key_value_states: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, layer_head_mask: Optional[Tensor]=None, past_key_value: Optional[Tuple[Tensor]]=None, output_attentions: bool=False) -> Tuple[Tensor, Optional[Tensor]]: (batch_size, tgt_len, hidden_size) = hidden_states.size() is_cross_attention = key_value_states is not None assert list(hidden_states.size()) == [batch_size, tgt_len, hidden_size], f'Size of hidden states should be {(batch_size, tgt_len, hidden_size)}, but is {hidden_states.size()}' query_states = self.query_proj(hidden_states) / self.head_dim ** 0.5 if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.key_proj(key_value_states), -1, batch_size) value_states = self._shape(self.value_proj(key_value_states), -1, batch_size) else: key_states = self._shape(self.key_proj(hidden_states), -1, batch_size) value_states = self._shape(self.value_proj(hidden_states), -1, batch_size) if is_cross_attention: past_key_value = (key_states, value_states) proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(2) attn_weights = torch.einsum('bsij,bsjk->bsik', query_states, key_states.transpose(2, 3)) expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len) if attn_weights.size() != expected_shape: raise ValueError(f'Attention weights should have size {expected_shape}, but is {attn_weights.size()}') if attention_mask is not None and attention_mask.dim() == 0: attention_mask = None expected_shape = (batch_size, self.num_attn_heads, 1, src_len) if attention_mask is not None and attention_mask.size() != expected_shape: raise ValueError(f'Attention mask should have size {expected_shape}, but is {attention_mask.size()}') if attention_mask is not None: attn_weights = attn_weights + attention_mask if output_attentions: attn_weights_reshaped = attn_weights else: attn_weights_reshaped = None attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(batch_size, self.num_attn_heads, tgt_len, src_len) attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped attn_probs = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.einsum('bsij,bsjk->bsik', attn_probs, value_states) expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim) if attn_output.size() != expected_shape: raise ValueError(f'`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}') attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size) attn_output = self.out_proj(attn_output) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return (attn_output, attn_weights_reshaped, past_key_value) class XLMProphetNetFeedForward(nn.Module): def __init__(self, config: XLMProphetNetConfig, ffn_dim: int): super().__init__() self.activation_fn = ACT2FN[config.activation_function] self.intermediate = nn.Linear(config.hidden_size, ffn_dim) self.output = nn.Linear(ffn_dim, config.hidden_size) self.activation_dropout = config.activation_dropout self.dropout = config.dropout def forward(self, hidden_states): hidden_states = self.intermediate(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.output(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class XLMProphetNetNgramSelfAttention(nn.Module): def __init__(self, config: XLMProphetNetConfig): super().__init__() self.hidden_size = config.hidden_size self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.num_attn_heads = config.num_decoder_attention_heads self.dropout = config.dropout self.attention_dropout = config.attention_dropout self.head_dim = config.hidden_size // self.num_attn_heads self.ngram = config.ngram assert self.head_dim * self.num_attn_heads == config.hidden_size, 'config.hidden_size must be divisible by num_attn_heads' self.key_proj = nn.Linear(config.hidden_size, config.hidden_size) self.value_proj = nn.Linear(config.hidden_size, config.hidden_size) self.query_proj = nn.Linear(config.hidden_size, config.hidden_size) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads) self.onnx_trace = False def _shape(self, tensor, seq_len, batch_size): return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() def prepare_for_onnx_export_(self): self.onnx_trace = True def forward(self, hidden_states, past_key_value: Optional[Tuple[Tensor]]=None, attention_mask=None, layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None): (batch_size, ngram_sequence_length, hidden_size) = hidden_states.size() assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], f'`hidden_states` should be of shape {(batch_size, ngram_sequence_length, hidden_size)}, but is of shape {hidden_states.shape}' query_states = self.query_proj(hidden_states) key_states = self.key_proj(hidden_states) value_states = self.value_proj(hidden_states) query_states = query_states / self.head_dim ** 0.5 query_states = self._shape(query_states, ngram_sequence_length, batch_size) key_states = self._shape(key_states, -1, batch_size) value_states = self._shape(value_states, -1, batch_size) proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) query_states = query_states.view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1) query_states_list = query_states.chunk(1 + self.ngram, dim=2) key_states_list = key_states.chunk(1 + self.ngram, dim=2) value_states_list = value_states.chunk(1 + self.ngram, dim=2) (main_hidden_states, hidden_states_predict_list) = (hidden_states_list[0], hidden_states_list[1:]) (main_query_states, predict_query_states_list) = (query_states_list[0], query_states_list[1:]) (main_key_states, predict_key_states_list) = (key_states_list[0], key_states_list[1:]) (main_value_states, predict_value_states_list) = (value_states_list[0], value_states_list[1:]) if past_key_value is not None: prev_main_key_states = past_key_value[0] main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2) prev_main_value_states = past_key_value[1] main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2) past_key_value = (main_key_states, main_value_states) sequence_length = ngram_sequence_length // (1 + self.ngram) main_attn_weights = torch.einsum('bntc,bncs->bnts', main_query_states, main_key_states.transpose(2, 3)) main_relative_pos_embeddings = self.get_main_relative_pos_embeddings(main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets) main_attn_weights = main_attn_weights + main_relative_pos_embeddings if attention_mask is not None: main_attn_weights = main_attn_weights + attention_mask main_attn_probs = softmax(main_attn_weights, dim=-1, onnx_trace=self.onnx_trace).type_as(main_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view(batch_size, self.num_attn_heads, -1, sequence_length) main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training) main_attn_output = torch.einsum('bntc,bncs->bnts', main_attn_probs, main_value_states) main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size) main_attn_output = self.out_proj(main_attn_output) predict_query_states = torch.stack(predict_query_states_list, 1).view(batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim) predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1) predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2) predict_value_states = torch.cat([torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2) predict_attn_weights = torch.einsum('bnhtc,bnhsc->bnhts', (predict_query_states, predict_key_states)) predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings(predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets) predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings if extended_predict_attention_mask is not None: extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4) extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype) predict_attn_weights = predict_attn_weights + extended_predict_attention_mask predict_attn_probs = softmax(predict_attn_weights, dim=-1, onnx_trace=self.onnx_trace).type_as(predict_attn_weights) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_attn_heads,), f'Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is {layer_head_mask.size()}' predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs predict_attn_probs = nn.functional.dropout(predict_attn_probs, p=self.attention_dropout, training=self.training) predict_attn_output = torch.einsum('bnhts,bnhsc->bnhtc', (predict_attn_probs, predict_value_states.transpose(1, 2))) predict_attn_output = predict_attn_output.transpose(2, 3) predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size) predict_attn_output = self.out_proj(predict_attn_output) attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size) main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1) attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) return (attn_output, main_attn_probs, predict_attn_probs, past_key_value) def get_main_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, main_relative_position_buckets): (batch_size, num_attn_heads, tgt_len, src_len) = attn_weights.shape attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len) if main_relative_position_buckets is None: (batch_size, sequence_length) = hidden_states.shape[:2] relative_positions = torch.arange(1, attn_weights.shape[-1] + 1).unsqueeze(0).unsqueeze(0).repeat(batch_size, sequence_length, 1).to(position_ids.device) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) main_relative_position_buckets = compute_relative_buckets(self.num_buckets, self.relative_max_distance, relative_positions, False) rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) rel_pos_embeddings = rel_pos_embeddings.view(rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads)) rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2) rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,)) main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) main_relative_position_buckets = main_relative_position_buckets.view(-1, main_relative_position_buckets.shape[-1]) main_relative_position_buckets = main_relative_position_buckets.long() rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets) main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1) return main_relative_pos_embeddings def get_predict_relative_pos_embeddings(self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets): (batch_size, sequence_length) = hidden_states.shape[0:2] if predict_relative_position_buckets is None: key_sequence_length = attn_weights.shape[-1] assert position_ids[0][0] == key_sequence_length - 1, '`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)' relative_positions = torch.arange(0, key_sequence_length).unsqueeze(0).unsqueeze(0).repeat(batch_size, sequence_length, 1).to(position_ids.device) relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) predict_relative_position_buckets = compute_relative_buckets(self.num_buckets, self.relative_max_distance, relative_positions, False) hidden_states = hidden_states.transpose(1, 2) rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) rel_pos_embeddings = rel_pos_embeddings.view(hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads)) rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3) rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets) predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0) predict_relative_position_buckets = predict_relative_position_buckets.repeat(self.ngram, 1, self.num_attn_heads, 1) predict_relative_position_buckets = predict_relative_position_buckets.view(-1, predict_relative_position_buckets.size(-1)).long() predict_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=predict_relative_position_buckets) predict_relative_pos_embeddings = predict_relative_pos_embeddings.view(batch_size, self.ngram, self.num_attn_heads, sequence_length, -1) return predict_relative_pos_embeddings class XLMProphetNetEncoderLayer(nn.Module): def __init__(self, config: XLMProphetNetConfig): super().__init__() self.self_attn = XLMProphetNetAttention(config, config.num_encoder_attention_heads) self.self_attn_layer_norm = LayerNorm(config.hidden_size) self.feed_forward = XLMProphetNetFeedForward(config, config.encoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) def forward(self, hidden_states, attention_mask, layer_head_mask, output_attentions: bool=False): (attention_output, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = self.self_attn_layer_norm(attention_output + hidden_states) feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class XLMProphetNetDecoderLayer(nn.Module): def __init__(self, config: XLMProphetNetConfig): super().__init__() self.self_attn = XLMProphetNetNgramSelfAttention(config) self.self_attn_layer_norm = LayerNorm(config.hidden_size) if config.add_cross_attention: self.cross_attn = XLMProphetNetAttention(config, config.num_decoder_attention_heads) self.cross_attn_layer_norm = LayerNorm(config.hidden_size) self.feed_forward = XLMProphetNetFeedForward(config, config.decoder_ffn_dim) self.feed_forward_layer_norm = LayerNorm(config.hidden_size) def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attn_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, extended_predict_attention_mask=None, main_relative_position_buckets=None, predict_relative_position_buckets=None, position_ids=None, past_key_value=None, use_cache: bool=True, output_attentions: bool=False): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids) hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output) cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attn_weights = None if encoder_hidden_states is not None: (attention_output, cross_attn_weights, cross_attn_present_key_value) = self.cross_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attn_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states) present_key_value = present_key_value + cross_attn_present_key_value feed_forward_output = self.feed_forward(hidden_states) hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs @add_start_docstrings('The standalone encoder part of the XLMProphetNetModel.', XLM_PROPHETNET_START_DOCSTRING) class XLMProphetNetEncoder(XLMProphetNetPreTrainedModel): def __init__(self, config: XLMProphetNetConfig, word_embeddings: nn.Embedding=None): super().__init__(config) self.word_embeddings = word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = XLMProphetNetPositionalEmbeddings(config) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.layers = nn.ModuleList([XLMProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('Either input_ids or inputs_embeds has to be passed.') elif input_ids is not None and inputs_embeds is not None: raise ValueError('Make sure to only pass input_ids or inputs_embeds.') elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) else: extended_attention_mask = None (position_embeddings, position_ids) = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device) hidden_states = inputs_embeds + position_embeddings hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training) encoder_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: assert head_mask.size()[0] == len(self.layers), f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, extended_attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask=extended_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_hidden_states = encoder_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions) @add_start_docstrings('The standalone decoder part of the XLMProphetNetModel.', XLM_PROPHETNET_START_DOCSTRING) class XLMProphetNetDecoder(XLMProphetNetPreTrainedModel): def __init__(self, config: XLMProphetNetConfig, word_embeddings: Optional[nn.Embedding]=None): super().__init__(config) self.ngram = config.ngram self.num_buckets = config.num_buckets self.relative_max_distance = config.relative_max_distance self.dropout = config.dropout self.max_target_positions = config.max_position_embeddings self.word_embeddings = word_embeddings if word_embeddings is not None else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = XLMProphetNetPositionalEmbeddings(config) self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None) self.layers = nn.ModuleList([XLMProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) self.embeddings_layer_norm = LayerNorm(config.hidden_size) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=XLMProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetDecoderModelOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and inputs_embeds is None: raise ValueError('Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.') elif input_ids is not None and inputs_embeds is not None: raise ValueError('Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.') elif input_ids is not None and inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) (batch_size, sequence_length) = inputs_embeds.shape[:2] (main_stream_pos_embed, position_ids) = self.position_embeddings((batch_size, sequence_length), device=inputs_embeds.device, past_key_values=past_key_values) if past_key_values is not None: (main_relative_position_buckets, predict_relative_position_buckets) = (None, None) else: (main_relative_position_buckets, predict_relative_position_buckets) = self.compute_buffered_relative_buckets(position_ids) predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) hidden_states = inputs_embeds + main_stream_pos_embed ngram_embeddings = self.ngram_embeddings.weight if past_key_values is not None: assert hidden_states.size(1) == 1, 'At the moment `use_cache` is only supported for `decoder_input_ids` of length 1' ngram_hidden_states = [(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1) for ngram in range(self.ngram)] extended_attention_mask = None extended_predict_attention_mask = None else: ngram_hidden_states = [ngram_embeddings[ngram - 1] + predicting_stream_pos_embed for ngram in range(self.ngram)] extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask) extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask) if encoder_attention_mask is not None: extended_encoder_attention_mask = (1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)) * torch.finfo(self.dtype).min extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype) else: extended_encoder_attention_mask = None hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1) if self.embeddings_layer_norm: hidden_states = self.embeddings_layer_norm(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) all_main_stream_hidden_states = () if output_hidden_states else None all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None all_main_stream_attns = () if output_attentions else None all_ngram_stream_attns = () if output_attentions else None all_cross_attns = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False present_key_values = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: assert attn_mask.size()[0] == len(self.layers), f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, extended_attention_mask, encoder_hidden_states, extended_encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, extended_predict_attention_mask, main_relative_position_buckets, predict_relative_position_buckets, position_ids, None, use_cache, output_attentions) else: layer_outputs = decoder_layer(hidden_states, attention_mask=extended_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attn_mask=extended_encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, extended_predict_attention_mask=extended_predict_attention_mask, main_relative_position_buckets=main_relative_position_buckets, predict_relative_position_buckets=predict_relative_position_buckets, position_ids=position_ids, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if use_cache: present_key_values += (layer_outputs[4 if output_attentions else 1],) if output_attentions: all_main_stream_attns += (layer_outputs[1],) all_ngram_stream_attns += (layer_outputs[2],) if self.config.add_cross_attention: all_cross_attns += (layer_outputs[3],) if output_hidden_states: all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) if self.config.ngram > 0: all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) last_hidden_state = hidden_states[:, :sequence_length] last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None if not return_dict: return tuple((v for v in [last_hidden_state, last_hidden_state_ngram, present_key_values, all_main_stream_hidden_states, all_ngram_stream_hidden_states, all_main_stream_attns, all_ngram_stream_attns, all_cross_attns] if v is not None)) return XLMProphetNetDecoderModelOutput(last_hidden_state=last_hidden_state, last_hidden_state_ngram=last_hidden_state_ngram, past_key_values=present_key_values, hidden_states=all_main_stream_hidden_states, hidden_states_ngram=all_ngram_stream_hidden_states, attentions=all_main_stream_attns, ngram_attentions=all_ngram_stream_attns, cross_attentions=all_cross_attns) def compute_buffered_relative_buckets(self, position_ids): (batch_size, sequence_length) = position_ids.shape position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) (main_relative_buckets, predict_relative_buckets) = compute_all_stream_relative_buckets(self.num_buckets, self.relative_max_distance, position_ids) main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1) predict_relative_buckets = torch.cat([predict_relative_buckets[:, :sequence_length, :sequence_length], predict_relative_buckets[:, :sequence_length, self.max_target_positions:self.max_target_positions + sequence_length]], 2).repeat(batch_size, 1, 1) return (main_relative_buckets, predict_relative_buckets) def prepare_attention_mask(self, hidden_states, attention_mask): (batch_size, seq_length) = hidden_states.shape[:2] causal_mask = torch.full((seq_length, seq_length), torch.finfo(hidden_states.dtype).min, dtype=hidden_states.dtype, device=hidden_states.device) causal_mask = torch.triu(causal_mask, 1) extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand((batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_causal_mask + extended_attention_mask else: extended_attention_mask = extended_causal_mask return extended_attention_mask.to(hidden_states.dtype) def prepare_predict_attention_mask(self, hidden_states, attention_mask): (batch_size, seq_length) = hidden_states.shape[:2] predict_causal_mask = ngram_attention_bias(self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype) predict_causal_mask = torch.cat([predict_causal_mask[:, :seq_length, :seq_length], predict_causal_mask[:, :seq_length, self.max_target_positions:self.max_target_positions + seq_length]], dim=-1) extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand((batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape) if attention_mask is not None: extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min extended_attention_mask = extended_attention_mask.expand((batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)) extended_attention_mask = torch.cat([extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1) extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask else: extended_predict_attention_mask = extended_predict_causal_mask return extended_predict_attention_mask.to(hidden_states.dtype) @add_start_docstrings('The bare XLMProphetNet Model outputting raw hidden-states without any specific head on top.', XLM_PROPHETNET_START_DOCSTRING) class XLMProphetNetModel(XLMProphetNetPreTrainedModel): _tied_weights_keys = ['encoder.word_embeddings.weight', 'decoder.word_embeddings.weight'] def __init__(self, config: XLMProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) encoder_config = copy.deepcopy(config) encoder_config.is_encoder_decoder = False encoder_config.use_cache = False self.encoder = XLMProphetNetEncoder(encoder_config, self.word_embeddings) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False self.decoder = XLMProphetNetDecoder(decoder_config, self.word_embeddings) self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, value): self.word_embeddings = value self.encoder.word_embeddings = self.word_embeddings self.decoder.word_embeddings = self.word_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return XLMProphetNetSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, decoder_attentions=decoder_outputs.attentions, decoder_ngram_attentions=decoder_outputs.ngram_attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings('The XLMProphetNet Model with a language modeling head. Can be used for sequence generation tasks.', XLM_PROPHETNET_START_DOCSTRING) class XLMProphetNetForConditionalGeneration(XLMProphetNetPreTrainedModel): _tied_weights_keys = ['encoder.word_embeddings.weight', 'decoder.word_embeddings.weight', 'lm_head.weight'] def __init__(self, config: XLMProphetNetConfig): super().__init__(config) self.prophetnet = XLMProphetNetModel(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) def get_input_embeddings(self): return self.prophetnet.word_embeddings @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetSeq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and decoder_input_ids is None and (decoder_inputs_embeds is None): decoder_input_ids = self._shift_right(labels) outputs = self.prophetnet(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None if not logits.is_contiguous(): logits = logits.contiguous() loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple((v for v in [logits, logits_ngram] if v is not None)) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return XLMProphetNetSeq2SeqLMOutput(loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, decoder_attentions=outputs.decoder_attentions, decoder_ngram_attentions=outputs.decoder_ngram_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax(logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction='mean') if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): assert encoder_outputs is not None, '`encoder_outputs` have to be passed for generation.' if past_key_values: decoder_input_ids = decoder_input_ids[:, -1:] return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past def get_encoder(self): return self.prophetnet.encoder def get_decoder(self): return self.prophetnet.decoder @add_start_docstrings('The standalone decoder part of the XLMProphetNetModel with a lm head on top. The model can be used for causal language modeling.', XLM_PROPHETNET_START_DOCSTRING) class XLMProphetNetForCausalLM(XLMProphetNetPreTrainedModel): _tied_weights_keys = ['prophetnet.word_embeddings.weight', 'prophetnet.decoder.word_embeddings.weight', 'lm_head.weight'] def __init__(self, config: XLMProphetNetConfig): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.prophetnet = XLMProphetNetDecoderWrapper(config) self.padding_idx = config.pad_token_id self.disable_ngram_loss = config.disable_ngram_loss self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.prophetnet.decoder.word_embeddings def set_input_embeddings(self, value): self.prophetnet.decoder.word_embeddings = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) def set_decoder(self, decoder): self.prophetnet.decoder = decoder def get_decoder(self): return self.prophetnet.decoder @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=XLMProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, XLMProphetNetDecoderLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.prophetnet.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (batch_size, sequence_length) = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) predict_logits = self.lm_head(predicting_streams) logits = predict_logits[:, 0] logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None loss = None if labels is not None: loss = self._compute_loss(predict_logits, labels) if not return_dict: all_logits = tuple((v for v in [logits, logits_ngram] if v is not None)) return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] else: return XLMProphetNetDecoderLMOutput(loss=loss, logits=logits, logits_ngram=logits_ngram, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, hidden_states_ngram=outputs.hidden_states_ngram, attentions=outputs.attentions, ngram_attentions=outputs.ngram_attentions, cross_attentions=outputs.cross_attentions) def _compute_loss(self, logits, labels, ignore_index=-100): expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) for i in range(self.config.ngram): if i > 0 and self.disable_ngram_loss: break expend_targets[i, :, :] = labels logits = logits.transpose(0, 1).contiguous() lprobs = nn.functional.log_softmax(logits.view(-1, logits.size(-1)), dim=-1, dtype=torch.float32) loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction='mean') if self.config.eps > 0.0: smooth_loss = -lprobs.sum(dim=-1, keepdim=True) non_masked_tokens = expend_targets.ne(ignore_index).view(-1) smooth_loss = smooth_loss[non_masked_tokens] smooth_loss = smooth_loss.mean() eps_i = self.config.eps / lprobs.size(-1) loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss return loss def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, use_cache=None, **kwargs): if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past class XLMProphetNetDecoderWrapper(XLMProphetNetPreTrainedModel): def __init__(self, config: XLMProphetNetConfig): super().__init__(config) self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.decoder = XLMProphetNetDecoder(config, word_embeddings=self.word_embeddings) self.post_init() def _tie_weights(self): self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) # File: transformers-main/src/transformers/models/deprecated/xlm_prophetnet/tokenization_xlm_prophetnet.py import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ....tokenization_utils import PreTrainedTokenizer from ....utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = '▁' VOCAB_FILES_NAMES = {'vocab_file': 'prophetnet.tokenizer'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab class XLMProphetNetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='[SEP]', eos_token='[SEP]', sep_token='[SEP]', unk_token='[UNK]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs try: import sentencepiece as spm except ImportError: logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece pip install sentencepiece') raise self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file self.fairseq_tokens_to_ids = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10): tok = f'[unused{i}]' self.fairseq_tokens_to_ids[tok] = 5 + i self.fairseq_offset = 12 self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()} super().__init__(bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, unk_token=unk_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece pip install sentencepiece') raise if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [0] * len(token_ids_0) + [1] return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> str: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return token_ids_0 + [self.sep_token_id] sep = [self.sep_token_id] return token_ids_0 + sep + token_ids_1 + sep # File: transformers-main/src/transformers/models/depth_anything/__init__.py from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _import_structure = {'configuration_depth_anything': ['DepthAnythingConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_depth_anything'] = ['DepthAnythingForDepthEstimation', 'DepthAnythingPreTrainedModel'] if TYPE_CHECKING: from .configuration_depth_anything import DepthAnythingConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_depth_anything import DepthAnythingForDepthEstimation, DepthAnythingPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/depth_anything/configuration_depth_anything.py """""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class DepthAnythingConfig(PretrainedConfig): model_type = 'depth_anything' def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, patch_size=14, initializer_range=0.02, reassemble_hidden_size=384, reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[48, 96, 192, 384], fusion_hidden_size=64, head_in_index=-1, head_hidden_size=32, depth_estimation_type='relative', max_depth=None, **kwargs): super().__init__(**kwargs) if backbone_config is None and backbone is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.') backbone_config = CONFIG_MAPPING['dinov2'](image_size=518, hidden_size=384, num_attention_heads=6, out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.reassemble_hidden_size = reassemble_hidden_size self.patch_size = patch_size self.initializer_range = initializer_range self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.head_hidden_size = head_hidden_size if depth_estimation_type not in ['relative', 'metric']: raise ValueError("depth_estimation_type must be one of ['relative', 'metric']") self.depth_estimation_type = depth_estimation_type self.max_depth = max_depth if max_depth else 1 def to_dict(self): output = copy.deepcopy(self.__dict__) if output['backbone_config'] is not None: output['backbone_config'] = self.backbone_config.to_dict() output['model_type'] = self.__class__.model_type return output # File: transformers-main/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py """""" import argparse from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DepthAnythingConfig, DepthAnythingForDepthEstimation, Dinov2Config, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): if 'small' in model_name: out_indices = [3, 6, 9, 12] if 'v2' in model_name else [9, 10, 11, 12] backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-small', out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False) fusion_hidden_size = 64 neck_hidden_sizes = [48, 96, 192, 384] elif 'base' in model_name: out_indices = [3, 6, 9, 12] if 'v2' in model_name else [9, 10, 11, 12] backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-base', out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False) fusion_hidden_size = 128 neck_hidden_sizes = [96, 192, 384, 768] elif 'large' in model_name: out_indices = [5, 12, 18, 24] if 'v2' in model_name else [21, 22, 23, 24] backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-large', out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False) fusion_hidden_size = 256 neck_hidden_sizes = [256, 512, 1024, 1024] else: raise NotImplementedError(f'Model not supported: {model_name}') if 'metric' in model_name: depth_estimation_type = 'metric' max_depth = 20 if 'indoor' in model_name else 80 else: depth_estimation_type = 'relative' max_depth = None config = DepthAnythingConfig(reassemble_hidden_size=backbone_config.hidden_size, patch_size=backbone_config.patch_size, backbone_config=backbone_config, fusion_hidden_size=fusion_hidden_size, neck_hidden_sizes=neck_hidden_sizes, depth_estimation_type=depth_estimation_type, max_depth=max_depth) return config def create_rename_keys(config): rename_keys = [] rename_keys.append(('pretrained.cls_token', 'backbone.embeddings.cls_token')) rename_keys.append(('pretrained.mask_token', 'backbone.embeddings.mask_token')) rename_keys.append(('pretrained.pos_embed', 'backbone.embeddings.position_embeddings')) rename_keys.append(('pretrained.patch_embed.proj.weight', 'backbone.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('pretrained.patch_embed.proj.bias', 'backbone.embeddings.patch_embeddings.projection.bias')) for i in range(config.backbone_config.num_hidden_layers): rename_keys.append((f'pretrained.blocks.{i}.ls1.gamma', f'backbone.encoder.layer.{i}.layer_scale1.lambda1')) rename_keys.append((f'pretrained.blocks.{i}.ls2.gamma', f'backbone.encoder.layer.{i}.layer_scale2.lambda1')) rename_keys.append((f'pretrained.blocks.{i}.norm1.weight', f'backbone.encoder.layer.{i}.norm1.weight')) rename_keys.append((f'pretrained.blocks.{i}.norm1.bias', f'backbone.encoder.layer.{i}.norm1.bias')) rename_keys.append((f'pretrained.blocks.{i}.norm2.weight', f'backbone.encoder.layer.{i}.norm2.weight')) rename_keys.append((f'pretrained.blocks.{i}.norm2.bias', f'backbone.encoder.layer.{i}.norm2.bias')) rename_keys.append((f'pretrained.blocks.{i}.mlp.fc1.weight', f'backbone.encoder.layer.{i}.mlp.fc1.weight')) rename_keys.append((f'pretrained.blocks.{i}.mlp.fc1.bias', f'backbone.encoder.layer.{i}.mlp.fc1.bias')) rename_keys.append((f'pretrained.blocks.{i}.mlp.fc2.weight', f'backbone.encoder.layer.{i}.mlp.fc2.weight')) rename_keys.append((f'pretrained.blocks.{i}.mlp.fc2.bias', f'backbone.encoder.layer.{i}.mlp.fc2.bias')) rename_keys.append((f'pretrained.blocks.{i}.attn.proj.weight', f'backbone.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'pretrained.blocks.{i}.attn.proj.bias', f'backbone.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append(('pretrained.norm.weight', 'backbone.layernorm.weight')) rename_keys.append(('pretrained.norm.bias', 'backbone.layernorm.bias')) for i in range(4): rename_keys.append((f'depth_head.projects.{i}.weight', f'neck.reassemble_stage.layers.{i}.projection.weight')) rename_keys.append((f'depth_head.projects.{i}.bias', f'neck.reassemble_stage.layers.{i}.projection.bias')) if i != 2: rename_keys.append((f'depth_head.resize_layers.{i}.weight', f'neck.reassemble_stage.layers.{i}.resize.weight')) rename_keys.append((f'depth_head.resize_layers.{i}.bias', f'neck.reassemble_stage.layers.{i}.resize.bias')) mapping = {1: 3, 2: 2, 3: 1, 4: 0} for i in range(1, 5): j = mapping[i] rename_keys.append((f'depth_head.scratch.refinenet{i}.out_conv.weight', f'neck.fusion_stage.layers.{j}.projection.weight')) rename_keys.append((f'depth_head.scratch.refinenet{i}.out_conv.bias', f'neck.fusion_stage.layers.{j}.projection.bias')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit1.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit1.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit1.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit1.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit2.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit2.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit2.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight')) rename_keys.append((f'depth_head.scratch.refinenet{i}.resConfUnit2.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias')) for i in range(4): rename_keys.append((f'depth_head.scratch.layer{i + 1}_rn.weight', f'neck.convs.{i}.weight')) rename_keys.append(('depth_head.scratch.output_conv1.weight', 'head.conv1.weight')) rename_keys.append(('depth_head.scratch.output_conv1.bias', 'head.conv1.bias')) rename_keys.append(('depth_head.scratch.output_conv2.0.weight', 'head.conv2.weight')) rename_keys.append(('depth_head.scratch.output_conv2.0.bias', 'head.conv2.bias')) rename_keys.append(('depth_head.scratch.output_conv2.2.weight', 'head.conv3.weight')) rename_keys.append(('depth_head.scratch.output_conv2.2.bias', 'head.conv3.bias')) return rename_keys def read_in_q_k_v(state_dict, config): hidden_size = config.backbone_config.hidden_size for i in range(config.backbone_config.num_hidden_layers): in_proj_weight = state_dict.pop(f'pretrained.blocks.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'pretrained.blocks.{i}.attn.qkv.bias') state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'backbone.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-hidden_size:] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im name_to_checkpoint = {'depth-anything-small': 'pytorch_model.bin', 'depth-anything-base': 'pytorch_model.bin', 'depth-anything-large': 'pytorch_model.bin', 'depth-anything-v2-small': 'depth_anything_v2_vits.pth', 'depth-anything-v2-base': 'depth_anything_v2_vitb.pth', 'depth-anything-v2-large': 'depth_anything_v2_vitl.pth', 'depth-anything-v2-metric-indoor-small': 'depth_anything_v2_metric_hypersim_vits.pth', 'depth-anything-v2-metric-indoor-base': 'depth_anything_v2_metric_hypersim_vitb.pth', 'depth-anything-v2-metric-indoor-large': 'depth_anything_v2_metric_hypersim_vitl.pth', 'depth-anything-v2-metric-outdoor-small': 'depth_anything_v2_metric_vkitti_vits.pth', 'depth-anything-v2-metric-outdoor-base': 'depth_anything_v2_metric_vkitti_vitb.pth', 'depth-anything-v2-metric-outdoor-large': 'depth_anything_v2_metric_vkitti_vitl.pth'} @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits): config = get_dpt_config(model_name) model_name_to_repo = {'depth-anything-small': 'LiheYoung/depth_anything_vits14', 'depth-anything-base': 'LiheYoung/depth_anything_vitb14', 'depth-anything-large': 'LiheYoung/depth_anything_vitl14', 'depth-anything-v2-small': 'depth-anything/Depth-Anything-V2-Small', 'depth-anything-v2-base': 'depth-anything/Depth-Anything-V2-Base', 'depth-anything-v2-large': 'depth-anything/Depth-Anything-V2-Large', 'depth-anything-v2-metric-indoor-small': 'depth-anything/Depth-Anything-V2-Metric-Hypersim-Small', 'depth-anything-v2-metric-indoor-base': 'depth-anything/Depth-Anything-V2-Metric-Hypersim-Base', 'depth-anything-v2-metric-indoor-large': 'depth-anything/Depth-Anything-V2-Metric-Hypersim-Large', 'depth-anything-v2-metric-outdoor-small': 'depth-anything/Depth-Anything-V2-Metric-VKITTI-Small', 'depth-anything-v2-metric-outdoor-base': 'depth-anything/Depth-Anything-V2-Metric-VKITTI-Base', 'depth-anything-v2-metric-outdoor-large': 'depth-anything/Depth-Anything-V2-Metric-VKITTI-Large'} repo_id = model_name_to_repo[model_name] filename = name_to_checkpoint[model_name] filepath = hf_hub_download(repo_id=repo_id, filename=f'{filename}') state_dict = torch.load(filepath, map_location='cpu') rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) model = DepthAnythingForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() processor = DPTImageProcessor(do_resize=True, size={'height': 518, 'width': 518}, ensure_multiple_of=14, keep_aspect_ratio=True, do_rescale=True, do_normalize=True, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225]) url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) pixel_values = processor(image, return_tensors='pt').pixel_values with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print('Shape of predicted depth:', predicted_depth.shape) print('First values:', predicted_depth[0, :3, :3]) if verify_logits: expected_shape = torch.Size([1, 518, 686]) if model_name == 'depth-anything-small': expected_slice = torch.tensor([[8.8204, 8.6468, 8.6195], [8.3313, 8.6027, 8.7526], [8.6526, 8.6866, 8.7453]]) elif model_name == 'depth-anything-base': expected_slice = torch.tensor([[26.3997, 26.3004, 26.3928], [26.226, 26.2092, 26.3427], [26.0719, 26.0483, 26.1254]]) elif model_name == 'depth-anything-large': expected_slice = torch.tensor([[87.9968, 87.7493, 88.2704], [87.1927, 87.6611, 87.364], [86.7789, 86.9469, 86.7991]]) elif model_name == 'depth-anything-v2-small': expected_slice = torch.tensor([[2.6751, 2.6211, 2.6571], [2.582, 2.6138, 2.6271], [2.616, 2.6141, 2.6306]]) elif model_name == 'depth-anything-v2-base': expected_slice = torch.tensor([[4.3576, 4.3723, 4.3908], [4.3231, 4.3146, 4.3611], [4.3016, 4.317, 4.3121]]) elif model_name == 'depth-anything-v2-large': expected_slice = torch.tensor([[162.2751, 161.8504, 162.8788], [160.3138, 160.805, 161.9835], [159.3812, 159.9884, 160.0768]]) elif model_name == 'depth-anything-v2-metric-indoor-small': expected_slice = torch.tensor([[1.3349, 1.2946, 1.2801], [1.2793, 1.2337, 1.2899], [1.2629, 1.2218, 1.2476]]) elif model_name == 'depth-anything-v2-metric-indoor-base': expected_slice = torch.tensor([[1.4601, 1.3824, 1.4904], [1.5031, 1.4349, 1.4274], [1.457, 1.4578, 1.42]]) elif model_name == 'depth-anything-v2-metric-indoor-large': expected_slice = torch.tensor([[1.504, 1.5019, 1.5218], [1.5087, 1.5195, 1.5149], [1.5437, 1.5128, 1.5252]]) elif model_name == 'depth-anything-v2-metric-outdoor-small': expected_slice = torch.tensor([[9.5804, 8.0339, 7.7386], [7.989, 7.2464, 7.7149], [7.7021, 7.233, 7.3304]]) elif model_name == 'depth-anything-v2-metric-outdoor-base': expected_slice = torch.tensor([[10.2916, 9.0933, 8.8622], [9.1964, 9.3393, 9.0644], [8.9618, 9.4201, 9.2262]]) elif model_name == 'depth-anything-v2-metric-outdoor-large': expected_slice = torch.tensor([[14.0137, 13.3627, 13.108], [13.2522, 13.3943, 13.3705], [13.0581, 13.4505, 13.3925]]) else: raise ValueError('Not supported') assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=0.0001) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model and processor to hub...') model.push_to_hub(repo_id=f'{model_name.title()}-hf') processor.push_to_hub(repo_id=f'{model_name.title()}-hf') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='depth-anything-small', type=str, choices=name_to_checkpoint.keys(), help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub after conversion.') parser.add_argument('--verify_logits', action='store_false', required=False, help='Whether to verify the logits after conversion.') args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits) # File: transformers-main/src/transformers/models/depth_anything/modeling_depth_anything.py """""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import logging from ...utils.backbone_utils import load_backbone from .configuration_depth_anything import DepthAnythingConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DepthAnythingConfig' DEPTH_ANYTHING_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`DepthAnythingConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DEPTH_ANYTHING_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DPTImageProcessor.__call__`]\n for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' class DepthAnythingReassembleLayer(nn.Module): def __init__(self, config, channels, factor): super().__init__() self.projection = nn.Conv2d(in_channels=config.reassemble_hidden_size, out_channels=channels, kernel_size=1) if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class DepthAnythingReassembleStage(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() for (channels, factor) in zip(config.neck_hidden_sizes, config.reassemble_factors): self.layers.append(DepthAnythingReassembleLayer(config, channels=channels, factor=factor)) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: out = [] for (i, hidden_state) in enumerate(hidden_states): hidden_state = hidden_state[:, 1:] (batch_size, _, num_channels) = hidden_state.shape hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out class DepthAnythingPreActResidualLayer(nn.Module): def __init__(self, config): super().__init__() self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) return hidden_state + residual class DepthAnythingFeatureFusionLayer(nn.Module): def __init__(self, config): super().__init__() self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = DepthAnythingPreActResidualLayer(config) self.residual_layer2 = DepthAnythingPreActResidualLayer(config) def forward(self, hidden_state, residual=None, size=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) modifier = {'scale_factor': 2} if size is None else {'size': size} hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True) hidden_state = self.projection(hidden_state) return hidden_state class DepthAnythingFeatureFusionStage(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList() for _ in range(len(config.neck_hidden_sizes)): self.layers.append(DepthAnythingFeatureFusionLayer(config)) def forward(self, hidden_states, size=None): hidden_states = hidden_states[::-1] fused_hidden_states = [] size = hidden_states[1].shape[2:] fused_hidden_state = self.layers[0](hidden_states[0], size=size) fused_hidden_states.append(fused_hidden_state) for (idx, (hidden_state, layer)) in enumerate(zip(hidden_states[1:], self.layers[1:])): size = hidden_states[1:][idx + 1].shape[2:] if idx != len(hidden_states[1:]) - 1 else None fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class DepthAnythingPreTrainedModel(PreTrainedModel): config_class = DepthAnythingConfig base_model_prefix = 'depth_anything' main_input_name = 'pixel_values' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class DepthAnythingNeck(nn.Module): def __init__(self, config): super().__init__() self.config = config self.reassemble_stage = DepthAnythingReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) self.fusion_stage = DepthAnythingFeatureFusionStage(config) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for (i, feature) in enumerate(hidden_states)] output = self.fusion_stage(features) return output class DepthAnythingDepthEstimationHead(nn.Module): def __init__(self, config): super().__init__() self.head_in_index = config.head_in_index self.patch_size = config.patch_size features = config.fusion_hidden_size self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(features // 2, config.head_hidden_size, kernel_size=3, stride=1, padding=1) self.activation1 = nn.ReLU() self.conv3 = nn.Conv2d(config.head_hidden_size, 1, kernel_size=1, stride=1, padding=0) if config.depth_estimation_type == 'relative': self.activation2 = nn.ReLU() elif config.depth_estimation_type == 'metric': self.activation2 = nn.Sigmoid() else: raise ValueError(f'Unknown depth estimation type: {config.depth_estimation_type}') self.max_depth = config.max_depth def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> torch.Tensor: hidden_states = hidden_states[self.head_in_index] predicted_depth = self.conv1(hidden_states) predicted_depth = nn.functional.interpolate(predicted_depth, (int(patch_height * self.patch_size), int(patch_width * self.patch_size)), mode='bilinear', align_corners=True) predicted_depth = self.conv2(predicted_depth) predicted_depth = self.activation1(predicted_depth) predicted_depth = self.conv3(predicted_depth) predicted_depth = self.activation2(predicted_depth) * self.max_depth predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @add_start_docstrings('\n Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n ', DEPTH_ANYTHING_START_DOCSTRING) class DepthAnythingForDepthEstimation(DepthAnythingPreTrainedModel): _no_split_modules = ['DPTViTEmbeddings'] def __init__(self, config): super().__init__(config) self.backbone = load_backbone(config) self.neck = DepthAnythingNeck(config) self.head = DepthAnythingDepthEstimationHead(config) self.post_init() @add_start_docstrings_to_model_forward(DEPTH_ANYTHING_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: loss = None if labels is not None: raise NotImplementedError('Training is not implemented yet') return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions) hidden_states = outputs.feature_maps (_, _, height, width) = pixel_values.shape patch_size = self.config.patch_size patch_height = height // patch_size patch_width = width // patch_size hidden_states = self.neck(hidden_states, patch_height, patch_width) predicted_depth = self.head(hidden_states, patch_height, patch_width) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return (loss,) + output if loss is not None else output return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/detr/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_detr': ['DetrConfig', 'DetrOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_detr'] = ['DetrFeatureExtractor'] _import_structure['image_processing_detr'] = ['DetrImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_detr'] = ['DetrForObjectDetection', 'DetrForSegmentation', 'DetrModel', 'DetrPreTrainedModel'] if TYPE_CHECKING: from .configuration_detr import DetrConfig, DetrOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_detr import DetrFeatureExtractor from .image_processing_detr import DetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_detr import DetrForObjectDetection, DetrForSegmentation, DetrModel, DetrPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/detr/configuration_detr.py """""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class DetrConfig(PretrainedConfig): model_type = 'detr' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'} def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs): if use_timm_backbone and backbone_kwargs is None: backbone_kwargs = {} if dilation: backbone_kwargs['output_stride'] = 16 backbone_kwargs['out_indices'] = [1, 2, 3, 4] backbone_kwargs['in_chans'] = num_channels elif not use_timm_backbone and backbone in (None, 'resnet50'): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) backbone = None dilation = None verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs self.dilation = dilation self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model @classmethod def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): return cls(backbone_config=backbone_config, **kwargs) class DetrOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 1e-05 @property def default_onnx_opset(self) -> int: return 12 # File: transformers-main/src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) rename_keys = [] for i in range(6): rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) rename_keys.extend([('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias')]) def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def rename_backbone_keys(state_dict): new_state_dict = OrderedDict() for (key, value) in state_dict.items(): if 'backbone.0.body' in key: new_key = key.replace('backbone.0.body', 'backbone.conv_encoder.model') new_state_dict[new_key] = value else: new_state_dict[key] = value return new_state_dict def read_in_q_k_v(state_dict, is_panoptic=False): prefix = '' if is_panoptic: prefix = 'detr.' for i in range(6): in_proj_weight = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias') state_dict[f'encoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :] state_dict[f'encoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256] state_dict[f'encoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :] state_dict[f'encoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512] state_dict[f'encoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :] state_dict[f'encoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:] for i in range(6): in_proj_weight = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias') state_dict[f'decoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :] state_dict[f'decoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256] state_dict[f'decoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :] state_dict[f'decoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512] state_dict[f'decoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :] state_dict[f'decoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:] in_proj_weight_cross_attn = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight') in_proj_bias_cross_attn = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias') state_dict[f'decoder.layers.{i}.encoder_attn.q_proj.weight'] = in_proj_weight_cross_attn[:256, :] state_dict[f'decoder.layers.{i}.encoder_attn.q_proj.bias'] = in_proj_bias_cross_attn[:256] state_dict[f'decoder.layers.{i}.encoder_attn.k_proj.weight'] = in_proj_weight_cross_attn[256:512, :] state_dict[f'decoder.layers.{i}.encoder_attn.k_proj.bias'] = in_proj_bias_cross_attn[256:512] state_dict[f'decoder.layers.{i}.encoder_attn.v_proj.weight'] = in_proj_weight_cross_attn[-256:, :] state_dict[f'decoder.layers.{i}.encoder_attn.v_proj.bias'] = in_proj_bias_cross_attn[-256:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_detr_checkpoint(model_name, pytorch_dump_folder_path): config = DetrConfig() if 'resnet101' in model_name: config.backbone = 'resnet101' if 'dc5' in model_name: config.dilation = True is_panoptic = 'panoptic' in model_name if is_panoptic: config.num_labels = 250 else: config.num_labels = 91 repo_id = 'huggingface/label-files' filename = 'coco-detection-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} format = 'coco_panoptic' if is_panoptic else 'coco_detection' image_processor = DetrImageProcessor(format=format) img = prepare_img() encoding = image_processor(images=img, return_tensors='pt') pixel_values = encoding['pixel_values'] logger.info(f'Converting model {model_name}...') detr = torch.hub.load('facebookresearch/detr', model_name, pretrained=True).eval() state_dict = detr.state_dict() for (src, dest) in rename_keys: if is_panoptic: src = 'detr.' + src rename_key(state_dict, src, dest) state_dict = rename_backbone_keys(state_dict) read_in_q_k_v(state_dict, is_panoptic=is_panoptic) prefix = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if key.startswith('detr') and (not key.startswith('class_labels_classifier')) and (not key.startswith('bbox_predictor')): val = state_dict.pop(key) state_dict['detr.model' + key[4:]] = val elif 'class_labels_classifier' in key or 'bbox_predictor' in key: val = state_dict.pop(key) state_dict['detr.' + key] = val elif key.startswith('bbox_attention') or key.startswith('mask_head'): continue else: val = state_dict.pop(key) state_dict[prefix + key] = val elif not key.startswith('class_labels_classifier') and (not key.startswith('bbox_predictor')): val = state_dict.pop(key) state_dict[prefix + key] = val model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() original_outputs = detr(pixel_values) outputs = model(pixel_values) assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=0.0001) assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=0.0001) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=0.0001) logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...') Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='detr_resnet50', type=str, help="Name of the DETR model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.') args = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/detr/convert_detr_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_detr_config(model_name): if 'resnet-50' in model_name: backbone_config = ResNetConfig.from_pretrained('microsoft/resnet-50') elif 'resnet-101' in model_name: backbone_config = ResNetConfig.from_pretrained('microsoft/resnet-101') else: raise ValueError('Model name should include either resnet50 or resnet101') config = DetrConfig(use_timm_backbone=False, backbone_config=backbone_config) is_panoptic = 'panoptic' in model_name if is_panoptic: config.num_labels = 250 else: config.num_labels = 91 repo_id = 'huggingface/label-files' filename = 'coco-detection-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} return (config, is_panoptic) def create_rename_keys(config): rename_keys = [] rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight')) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight')) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias')) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean')) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var')) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): if layer_idx == 0: rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var')) for i in range(3): rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i + 1}.weight', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.weight', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.bias', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.running_mean', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean')) rename_keys.append((f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i + 1}.running_var', f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var')) for i in range(config.encoder_layers): rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', f'decoder.layers.{i}.encoder_attn.out_proj.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', f'decoder.layers.{i}.encoder_attn.out_proj.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias')) rename_keys.extend([('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias')]) return rename_keys def rename_key(state_dict, old, new): val = state_dict.pop(old) state_dict[new] = val def read_in_q_k_v(state_dict, is_panoptic=False): prefix = '' if is_panoptic: prefix = 'detr.' for i in range(6): in_proj_weight = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias') state_dict[f'encoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :] state_dict[f'encoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256] state_dict[f'encoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :] state_dict[f'encoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512] state_dict[f'encoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :] state_dict[f'encoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:] for i in range(6): in_proj_weight = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias') state_dict[f'decoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:256, :] state_dict[f'decoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:256] state_dict[f'decoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[256:512, :] state_dict[f'decoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[256:512] state_dict[f'decoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-256:, :] state_dict[f'decoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-256:] in_proj_weight_cross_attn = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight') in_proj_bias_cross_attn = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias') state_dict[f'decoder.layers.{i}.encoder_attn.q_proj.weight'] = in_proj_weight_cross_attn[:256, :] state_dict[f'decoder.layers.{i}.encoder_attn.q_proj.bias'] = in_proj_bias_cross_attn[:256] state_dict[f'decoder.layers.{i}.encoder_attn.k_proj.weight'] = in_proj_weight_cross_attn[256:512, :] state_dict[f'decoder.layers.{i}.encoder_attn.k_proj.bias'] = in_proj_bias_cross_attn[256:512] state_dict[f'decoder.layers.{i}.encoder_attn.v_proj.weight'] = in_proj_weight_cross_attn[-256:, :] state_dict[f'decoder.layers.{i}.encoder_attn.v_proj.bias'] = in_proj_bias_cross_attn[-256:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_detr_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): (config, is_panoptic) = get_detr_config(model_name) model_name_to_original_name = {'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101'} logger.info(f'Converting model {model_name}...') detr = torch.hub.load('facebookresearch/detr', model_name_to_original_name[model_name], pretrained=True).eval() state_dict = detr.state_dict() for (src, dest) in create_rename_keys(config): if is_panoptic: src = 'detr.' + src rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, is_panoptic=is_panoptic) prefix = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if key.startswith('detr') and (not key.startswith('class_labels_classifier')) and (not key.startswith('bbox_predictor')): val = state_dict.pop(key) state_dict['detr.model' + key[4:]] = val elif 'class_labels_classifier' in key or 'bbox_predictor' in key: val = state_dict.pop(key) state_dict['detr.' + key] = val elif key.startswith('bbox_attention') or key.startswith('mask_head'): continue else: val = state_dict.pop(key) state_dict[prefix + key] = val elif not key.startswith('class_labels_classifier') and (not key.startswith('bbox_predictor')): val = state_dict.pop(key) state_dict[prefix + key] = val model = DetrForSegmentation(config) if is_panoptic else DetrForObjectDetection(config) model.load_state_dict(state_dict) model.eval() format = 'coco_panoptic' if is_panoptic else 'coco_detection' processor = DetrImageProcessor(format=format) encoding = processor(images=prepare_img(), return_tensors='pt') pixel_values = encoding['pixel_values'] original_outputs = detr(pixel_values) outputs = model(pixel_values) assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=0.001) assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=0.001) if is_panoptic: assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=0.0001) print('Looks ok!') if pytorch_dump_folder_path is not None: logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...') Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: logger.info('Uploading PyTorch model and image processor to the hub...') model.push_to_hub(f'nielsr/{model_name}') processor.push_to_hub(f'nielsr/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help="Name of the DETR model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') args = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/detr/feature_extraction_detr.py """""" import warnings from ...image_transforms import rgb_to_id as _rgb_to_id from ...utils import logging from .image_processing_detr import DetrImageProcessor logger = logging.get_logger(__name__) def rgb_to_id(x): warnings.warn('rgb_to_id has moved and will not be importable from this module from v5. Please import from transformers.image_transforms instead.', FutureWarning) return _rgb_to_id(x) class DetrFeatureExtractor(DetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class DetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DetrImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/detr/image_processing_detr.py """""" import io import pathlib from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments from ...utils import TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats logger = logging.get_logger(__name__) SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: (height, width) = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if height <= width and height == size or (width <= height and width == size): (oh, ow) = (height, width) elif width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) return (oh, ow) def get_image_size_for_max_height_width(input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: image_size = get_image_size(input_image, input_data_format) (height, width) = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return (new_height, new_width) def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) def get_numpy_to_framework_fn(arr) -> Callable: if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f'Cannot convert arrays of type {type(arr)}') def safe_squeeze(arr: np.ndarray, axis: Optional[int]=None) -> np.ndarray: if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: (image_height, image_width) = image_size norm_annotation = {} for (key, value) in annotation.items(): if key == 'boxes': boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation def max_across_indices(values: Iterable[Any]) -> List[Any]: return [max(values_i) for values_i in zip(*values)] def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]: if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: (_, max_height, max_width) = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: (max_height, max_width, _) = max_across_indices([img.shape for img in images]) else: raise ValueError(f'Invalid channel dimension format: {input_data_format}') return (max_height, max_width) def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: try: from pycocotools import mask as coco_mask except ImportError: raise ImportError('Pycocotools is not installed in your environment.') masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool=False, input_data_format: Optional[Union[ChannelDimension, str]]=None): (image_height, image_width) = get_image_size(image, channel_dim=input_data_format) image_id = target['image_id'] image_id = np.asarray([image_id], dtype=np.int64) annotations = target['annotations'] annotations = [obj for obj in annotations if 'iscrowd' not in obj or obj['iscrowd'] == 0] classes = [obj['category_id'] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) area = np.asarray([obj['area'] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj['iscrowd'] if 'iscrowd' in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj['bbox'] for obj in annotations] boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target['image_id'] = image_id new_target['class_labels'] = classes[keep] new_target['boxes'] = boxes[keep] new_target['area'] = area[keep] new_target['iscrowd'] = iscrowd[keep] new_target['orig_size'] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and 'keypoints' in annotations[0]: keypoints = [obj['keypoints'] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target['keypoints'] = keypoints if return_segmentation_masks: segmentation_masks = [obj['segmentation'] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target['masks'] = masks[keep] return new_target def masks_to_boxes(masks: np.ndarray) -> np.ndarray: if masks.size == 0: return np.zeros((0, 4)) (h, w) = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) (y, x) = np.meshgrid(y, x, indexing='ij') x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool)) x_min = x.filled(fill_value=100000000.0) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool)) y_min = y.filled(fill_value=100000000.0) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) def prepare_coco_panoptic_annotation(image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool=True, input_data_format: Union[ChannelDimension, str]=None) -> Dict: (image_height, image_width) = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target['file_name'] new_target = {} new_target['image_id'] = np.asarray([target['image_id'] if 'image_id' in target else target['id']], dtype=np.int64) new_target['size'] = np.asarray([image_height, image_width], dtype=np.int64) new_target['orig_size'] = np.asarray([image_height, image_width], dtype=np.int64) if 'segments_info' in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info['id'] for segment_info in target['segments_info']]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target['masks'] = masks new_target['boxes'] = masks_to_boxes(masks) new_target['class_labels'] = np.array([segment_info['category_id'] for segment_info in target['segments_info']], dtype=np.int64) new_target['iscrowd'] = np.asarray([segment_info['iscrowd'] for segment_info in target['segments_info']], dtype=np.int64) new_target['area'] = np.asarray([segment_info['area'] for segment_info in target['segments_info']], dtype=np.float32) return new_target def get_segmentation_image(masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False): (h, w) = input_size (final_h, final_w) = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: (final_h, final_w) = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) (scores, labels) = (scores.squeeze(-1), labels.squeeze(-1)) return (scores, labels) def post_process_panoptic_sample(out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85) -> Dict: (scores, labels) = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError('Not as many boxes as there are classes') cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) (b, h, w) = cur_masks.shape cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for (k, label) in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) if cur_classes.size() > 0: filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [{'id': i, 'isthing': is_thing_map[cat], 'category_id': int(cat), 'area': a} for (i, (cat, a)) in enumerate(zip(cur_classes, area))] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format='PNG') predictions = {'png_string': out.getvalue(), 'segments_info': segments_info} return predictions def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST): ratios = tuple((float(s) / float(s_orig) for (s, s_orig) in zip(target_size, orig_size))) (ratio_height, ratio_width) = ratios new_annotation = {} new_annotation['size'] = target_size for (key, value) in annotation.items(): if key == 'boxes': boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation['boxes'] = scaled_boxes elif key == 'area': area = value scaled_area = area * (ratio_width * ratio_height) new_annotation['area'] = scaled_area elif key == 'masks': masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation['masks'] = masks elif key == 'size': new_annotation['size'] = target_size else: new_annotation[key] = value return new_annotation def binary_mask_to_rle(mask): if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) def convert_segmentation_to_rle(segmentation): segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): if not masks.shape[0] == scores.shape[0] == labels.shape[0]: raise ValueError('mask, scores and labels must have the same shape!') to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return (masks[to_keep], scores[to_keep], labels[to_keep]) def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): mask_k = mask_labels == k mask_k_area = mask_k.sum() original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return (mask_exists, mask_k) def compute_segments(mask_probs, pred_scores, pred_labels, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_size: Tuple[int, int]=None): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate(mask_probs.unsqueeze(0), size=target_size, mode='bilinear', align_corners=False)[0] current_segment_id = 0 mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse (mask_exists, mask_k) = check_segment_validity(mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append({'id': current_segment_id, 'label_id': pred_class, 'was_fused': should_fuse, 'score': segment_score}) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return (segmentation, segments) class DetrImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values', 'pixel_mask'] def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Union[float, List[float]]=None, image_std: Union[float, List[float]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None: if 'pad_and_return_pixel_mask' in kwargs: do_pad = kwargs.pop('pad_and_return_pixel_mask') if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None if size is None else 1333 size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self._valid_processor_keys = ['images', 'annotations', 'return_segmentation_masks', 'masks_path', 'do_resize', 'size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'do_convert_annotations', 'image_mean', 'image_std', 'do_pad', 'pad_size', 'format', 'return_tensors', 'data_format', 'input_data_format'] @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): image_processor_dict = image_processor_dict.copy() if 'max_size' in kwargs: image_processor_dict['max_size'] = kwargs.pop('max_size') if 'pad_and_return_pixel_mask' in kwargs: image_processor_dict['pad_and_return_pixel_mask'] = kwargs.pop('pad_and_return_pixel_mask') return super().from_dict(image_processor_dict, **kwargs) def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict: format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format) else: raise ValueError(f'Format {format} is not supported.') return target def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if 'shortest_edge' in size and 'longest_edge' in size: new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format) elif 'max_height' in size and 'max_width' in size: new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format) elif 'height' in size and 'width' in size: new_size = (size['height'], size['width']) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.") image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return image def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict: return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: return normalize_annotation(annotation, image_size=image_size) def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict: new_annotation = {} new_annotation['size'] = output_image_size for (key, value) in annotation.items(): if key == 'masks': masks = value masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST) masks = safe_squeeze(masks, 1) new_annotation['masks'] = masks elif key == 'boxes' and update_bboxes: boxes = value boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]]) new_annotation['boxes'] = boxes elif key == 'size': new_annotation['size'] = output_image_size else: new_annotation[key] = value return new_annotation def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) if annotation is not None: annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes) return (padded_image, annotation) def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature: pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size['height'], pad_size['width']) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) annotation_list = annotations if annotations is not None else [None] * len(images) padded_images = [] padded_annotations = [] for (image, annotation) in zip(images, annotation_list): (padded_image, padded_annotation) = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes) padded_images.append(padded_image) padded_annotations.append(padded_annotation) data = {'pixel_values': padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images] data['pixel_mask'] = masks encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations] return encoded_inputs def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature: if 'pad_and_return_pixel_mask' in kwargs: logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.') do_pad = kwargs.pop('pad_and_return_pixel_mask') max_size = None if 'max_size' in kwargs: logger.warning_once("The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.") size = kwargs.pop('max_size') do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations do_pad = self.do_pad if do_pad is None else do_pad pad_size = self.pad_size if pad_size is None else pad_size format = self.format if format is None else format images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.') format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))): raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.') images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if annotations is not None: prepared_images = [] prepared_annotations = [] for (image, target) in zip(images, annotations): target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations if do_resize: if annotations is not None: (resized_images, resized_annotations) = ([], []) for (image, target) in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images] if do_convert_annotations and annotations is not None: annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for (annotation, image) in zip(annotations, images)] if do_pad: encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size) else: images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations] return encoded_inputs def post_process(self, outputs, target_sizes): logger.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.') (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes) if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') prob = nn.functional.softmax(out_logits, -1) (scores, labels) = prob[..., :-1].max(-1) boxes = center_to_corners_format(out_bbox) (img_h, img_w) = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [{'scores': s, 'labels': l, 'boxes': b} for (s, l, b) in zip(scores, labels, boxes)] return results def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): logger.warning_once('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_semantic_segmentation`.') (out_logits, raw_masks) = (outputs.logits, outputs.pred_masks) empty_label = out_logits.shape[-1] - 1 preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for (cur_logits, cur_masks, size) in zip(out_logits, raw_masks, target_sizes): (cur_scores, cur_labels) = cur_logits.softmax(-1).max(-1) keep = cur_labels.ne(empty_label) & (cur_scores > threshold) cur_scores = cur_scores[keep] cur_labels = cur_labels[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode='bilinear').squeeze(1) cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 predictions = {'scores': cur_scores, 'labels': cur_labels, 'masks': cur_masks} preds.append(predictions) return preds def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5): logger.warning_once('`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use `post_process_instance_segmentation`.') if len(orig_target_sizes) != len(max_target_sizes): raise ValueError('Make sure to pass in as many orig_target_sizes as max_target_sizes') (max_h, max_w) = max_target_sizes.max(0)[0].tolist() outputs_masks = outputs.pred_masks.squeeze(2) outputs_masks = nn.functional.interpolate(outputs_masks, size=(max_h, max_w), mode='bilinear', align_corners=False) outputs_masks = (outputs_masks.sigmoid() > threshold).cpu() for (i, (cur_mask, t, tt)) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): (img_h, img_w) = (t[0], t[1]) results[i]['masks'] = cur_mask[:, :img_h, :img_w].unsqueeze(1) results[i]['masks'] = nn.functional.interpolate(results[i]['masks'].float(), size=tuple(tt.tolist()), mode='nearest').byte() return results def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85): logger.warning_once('`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use `post_process_panoptic_segmentation`.') if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): raise ValueError('Make sure to pass in as many processed_sizes as target_sizes') if is_thing_map is None: is_thing_map = {i: i <= 90 for i in range(201)} (out_logits, raw_masks, raw_boxes) = (outputs.logits, outputs.pred_masks, outputs.pred_boxes) if not len(out_logits) == len(raw_masks) == len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits and masks') empty_label = out_logits.shape[-1] - 1 preds = [] def to_tuple(tup): if isinstance(tup, tuple): return tup return tuple(tup.cpu().tolist()) for (cur_logits, cur_masks, cur_boxes, size, target_size) in zip(out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes): (cur_scores, cur_labels) = cur_logits.softmax(-1).max(-1) keep = cur_labels.ne(empty_label) & (cur_scores > threshold) cur_scores = cur_scores[keep] cur_labels = cur_labels[keep] cur_masks = cur_masks[keep] cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode='bilinear').squeeze(1) cur_boxes = center_to_corners_format(cur_boxes[keep]) (h, w) = cur_masks.shape[-2:] if len(cur_boxes) != len(cur_labels): raise ValueError('Not as many boxes as there are classes') cur_masks = cur_masks.flatten(1) stuff_equiv_classes = defaultdict(lambda : []) for (k, label) in enumerate(cur_labels): if not is_thing_map[label.item()]: stuff_equiv_classes[label.item()].append(k) def get_ids_area(masks, scores, dedup=False): m_id = masks.transpose(0, 1).softmax(-1) if m_id.shape[-1] == 0: m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) else: m_id = m_id.argmax(-1).view(h, w) if dedup: for equiv in stuff_equiv_classes.values(): if len(equiv) > 1: for eq_id in equiv: m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) (final_h, final_w) = to_tuple(target_size) seg_img = PIL.Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy())) seg_img = seg_img.resize(size=(final_w, final_h), resample=PILImageResampling.NEAREST) np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) np_seg_img = np_seg_img.view(final_h, final_w, 3) np_seg_img = np_seg_img.numpy() m_id = torch.from_numpy(rgb_to_id(np_seg_img)) area = [] for i in range(len(scores)): area.append(m_id.eq(i).sum().item()) return (area, seg_img) (area, seg_img) = get_ids_area(cur_masks, cur_scores, dedup=True) if cur_labels.numel() > 0: while True: filtered_small = torch.as_tensor([area[i] <= 4 for (i, c) in enumerate(cur_labels)], dtype=torch.bool, device=keep.device) if filtered_small.any().item(): cur_scores = cur_scores[~filtered_small] cur_labels = cur_labels[~filtered_small] cur_masks = cur_masks[~filtered_small] (area, seg_img) = get_ids_area(cur_masks, cur_scores) else: break else: cur_labels = torch.ones(1, dtype=torch.long, device=cur_labels.device) segments_info = [] for (i, a) in enumerate(area): cat = cur_labels[i].item() segments_info.append({'id': i, 'isthing': is_thing_map[cat], 'category_id': cat, 'area': a}) del cur_labels with io.BytesIO() as out: seg_img.save(out, format='PNG') predictions = {'png_string': out.getvalue(), 'segments_info': segments_info} preds.append(predictions) return preds def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None): (out_logits, out_bbox) = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') prob = nn.functional.softmax(out_logits, -1) (scores, labels) = prob[..., :-1].max(-1) boxes = center_to_corners_format(out_bbox) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: (img_h, img_w) = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for (s, l, b) in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]]=None): class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[List[Tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False) -> List[Dict]: class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() (pred_scores, pred_labels) = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) results: List[Dict[str, TensorType]] = [] for i in range(batch_size): (mask_probs_item, pred_scores_item, pred_labels_item) = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels) if mask_probs_item.shape[0] <= 0: (height, width) = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({'segmentation': segmentation, 'segments_info': []}) continue target_size = target_sizes[i] if target_sizes is not None else None (segmentation, segments) = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size) if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({'segmentation': segmentation, 'segments_info': segments}) return results def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_sizes: Optional[List[Tuple[int, int]]]=None) -> List[Dict]: if label_ids_to_fuse is None: logger.warning_once('`label_ids_to_fuse` unset. No instance will be fused.') label_ids_to_fuse = set() class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() (pred_scores, pred_labels) = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) results: List[Dict[str, TensorType]] = [] for i in range(batch_size): (mask_probs_item, pred_scores_item, pred_labels_item) = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels) if mask_probs_item.shape[0] <= 0: (height, width) = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({'segmentation': segmentation, 'segments_info': []}) continue target_size = target_sizes[i] if target_sizes is not None else None (segmentation, segments) = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size) results.append({'segmentation': segmentation, 'segments_info': segments}) return results # File: transformers-main/src/transformers/models/detr/modeling_detr.py """""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_accelerate_available, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends from ...utils.backbone_utils import load_backbone from .configuration_detr import DetrConfig if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model if is_vision_available(): from transformers.image_transforms import center_to_corners_format logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DetrConfig' _CHECKPOINT_FOR_DOC = 'facebook/detr-resnet-50' @dataclass class DetrDecoderOutput(BaseModelOutputWithCrossAttentions): intermediate_hidden_states: Optional[torch.FloatTensor] = None @dataclass class DetrModelOutput(Seq2SeqModelOutput): intermediate_hidden_states: Optional[torch.FloatTensor] = None @dataclass class DetrObjectDetectionOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DetrSegmentationOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None pred_masks: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None class DetrFrozenBatchNorm2d(nn.Module): def __init__(self, n): super().__init__() self.register_buffer('weight', torch.ones(n)) self.register_buffer('bias', torch.zeros(n)) self.register_buffer('running_mean', torch.zeros(n)) self.register_buffer('running_var', torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-05 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias def replace_batch_norm(model): for (name, module) in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DetrFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) class DetrConvEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ['timm']) kwargs = getattr(config, 'backbone_kwargs', {}) kwargs = {} if kwargs is None else kwargs.copy() out_indices = kwargs.pop('out_indices', (1, 2, 3, 4)) num_channels = kwargs.pop('in_chans', config.num_channels) if config.dilation: kwargs['output_stride'] = kwargs.get('output_stride', 16) backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs) else: backbone = load_backbone(config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError('Either `backbone` or `backbone_config` should be provided in the config') if 'resnet' in backbone_model_type: for (name, parameter) in self.model.named_parameters(): if config.use_timm_backbone: if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name): parameter.requires_grad_(False) elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name): parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out class DetrConvModel(nn.Module): def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for (feature_map, mask) in out: pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return (out, pos) class DetrSinePositionEmbedding(nn.Module): def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError('normalize should be True if scale is passed') if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError('No pixel mask provided') y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos class DetrLearnedPositionEmbedding(nn.Module): def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): (height, width) = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == 'sine': position_embedding = DetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == 'learned': position_embedding = DetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f'Not supported {config.position_embedding_type}') return position_embedding class DetrAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]): return tensor if object_queries is None else tensor + object_queries def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, spatial_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (batch_size, target_len, embed_dim) = hidden_states.size() if object_queries is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, object_queries) if spatial_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class DetrEncoderLayer(nn.Module): def __init__(self, config: DetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor=None, output_attentions: bool=False): residual = hidden_states (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class DetrDecoderLayer(nn.Module): def __init__(self, config: DetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = DetrAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False): residual = hidden_states (hidden_states, self_attn_weights) = self.self_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, object_queries=query_position_embeddings, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, spatial_position_embeddings=object_queries, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class DetrPreTrainedModel(PreTrainedModel): config_class = DetrConfig base_model_prefix = 'model' main_input_name = 'pixel_values' _no_split_modules = ['DetrConvEncoder', 'DetrEncoderLayer', 'DetrDecoderLayer'] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, DetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, DetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() DETR_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`DetrConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DETR_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it.\n\n Pixel values can be obtained using [`AutoImageProcessor`]. See [`DetrImageProcessor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n Not used by default. Can be used to mask object queries.\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n can choose to directly pass a flattened representation of an image.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n embedded representation.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class DetrEncoder(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([DetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.post_init() def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for (i, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class DetrDecoder(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] combined_attention_mask = None if attention_mask is not None and combined_attention_mask is not None: combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) intermediate = () if self.config.auxiliary_loss else None all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, None) else: layer_outputs = decoder_layer(hidden_states, attention_mask=combined_attention_mask, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None)) return DetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate) @add_start_docstrings('\n The bare DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without\n any specific head on top.\n ', DETR_START_DOCSTRING) class DetrModel(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) backbone = DetrConvEncoder(config) object_queries = build_position_encoding(config) self.backbone = DetrConvModel(backbone, object_queries) self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = DetrEncoder(config) self.decoder = DetrDecoder(config) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for (name, param) in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for (name, param) in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetrModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, num_channels, height, width) = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) (features, object_queries_list) = self.backbone(pixel_values, pixel_mask) (feature_map, mask) = features[-1] if mask is None: raise ValueError('Backbone does not return downsampled pixel mask') projected_feature_map = self.input_projection(feature_map) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) if encoder_outputs is None: encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return DetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states) @add_start_docstrings('\n DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks\n such as COCO detection.\n ', DETR_START_DOCSTRING) class DetrForObjectDetection(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) self.model = DetrModel(config) self.class_labels_classifier = nn.Linear(config.d_model, config.num_labels + 1) self.bbox_predictor = DetrMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3) self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): return [{'logits': a, 'pred_boxes': b} for (a, b) in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetrObjectDetectionOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.class_labels_classifier(sequence_output) pred_boxes = self.bbox_predictor(sequence_output).sigmoid() (loss, loss_dict, auxiliary_outputs) = (None, None, None) if labels is not None: matcher = DetrHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost) losses = ['labels', 'boxes', 'cardinality'] criterion = DetrLoss(matcher=matcher, num_classes=self.config.num_labels, eos_coef=self.config.eos_coefficient, losses=losses) criterion.to(self.device) outputs_loss = {} outputs_loss['logits'] = logits outputs_loss['pred_boxes'] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss['auxiliary_outputs'] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient} weight_dict['loss_giou'] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return (loss, loss_dict) + output if loss is not None else output return DetrObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('\n DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks\n such as COCO panoptic.\n\n ', DETR_START_DOCSTRING) class DetrForSegmentation(DetrPreTrainedModel): def __init__(self, config: DetrConfig): super().__init__(config) self.detr = DetrForObjectDetection(config) (hidden_size, number_of_heads) = (config.d_model, config.encoder_attention_heads) intermediate_channel_sizes = self.detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = DetrMaskHeadSmallConv(hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size) self.bbox_attention = DetrMHAttentionMap(hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std) self.post_init() @add_start_docstrings_to_model_forward(DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetrSegmentationOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, num_channels, height, width) = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) (features, object_queries_list) = self.detr.model.backbone(pixel_values, pixel_mask=pixel_mask) (feature_map, mask) = features[-1] (batch_size, num_channels, height, width) = feature_map.shape projected_feature_map = self.detr.model.input_projection(feature_map) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) if encoder_outputs is None: encoder_outputs = self.detr.model.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) query_position_embeddings = self.detr.model.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) decoder_outputs = self.detr.model.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = decoder_outputs[0] logits = self.detr.class_labels_classifier(sequence_output) pred_boxes = self.detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view(batch_size, self.detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) (loss, loss_dict, auxiliary_outputs) = (None, None, None) if labels is not None: matcher = DetrHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost) losses = ['labels', 'boxes', 'cardinality', 'masks'] criterion = DetrLoss(matcher=matcher, num_classes=self.config.num_labels, eos_coef=self.config.eos_coefficient, losses=losses) criterion.to(self.device) outputs_loss = {} outputs_loss['logits'] = logits outputs_loss['pred_boxes'] = pred_boxes outputs_loss['pred_masks'] = pred_masks if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.detr.class_labels_classifier(intermediate) outputs_coord = self.detr.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self.detr._set_aux_loss(outputs_class, outputs_coord) outputs_loss['auxiliary_outputs'] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient} weight_dict['loss_giou'] = self.config.giou_loss_coefficient weight_dict['loss_mask'] = self.config.mask_loss_coefficient weight_dict['loss_dice'] = self.config.dice_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return (loss, loss_dict) + output if loss is not None else output return DetrSegmentationOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) class DetrMaskHeadSmallConv(nn.Module): def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError('The hidden_size + number of attention heads must be divisible by 8 as the number of groups in GroupNorm is set to 8') inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x class DetrMHAttentionMap(nn.Module): def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** (-0.5) def forward(self, q, k, mask: Optional[Tensor]=None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum('bqnc,bnchw->bqnhw', queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights def dice_loss(inputs, targets, num_boxes): inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2): prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none') p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * (1 - p_t) ** gamma if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DetrLoss(nn.Module): def __init__(self, matcher, num_classes, eos_coef, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.eos_coef = eos_coef self.losses = losses empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer('empty_weight', empty_weight) def loss_labels(self, outputs, targets, indices, num_boxes): if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') source_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t['class_labels'][J] for (t, (_, J)) in zip(targets, indices)]) target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {'loss_ce': loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): logits = outputs['logits'] device = logits.device target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): if 'pred_boxes' not in outputs: raise KeyError('No predicted boxes found in outputs') idx = self._get_source_permutation_idx(indices) source_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): if 'pred_masks' not in outputs: raise KeyError('No predicted masks found in outputs') source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs['pred_masks'] source_masks = source_masks[source_idx] masks = [t['masks'] for t in targets] (target_masks, valid) = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] source_masks = nn.functional.interpolate(source_masks[:, None], size=target_masks.shape[-2:], mode='bilinear', align_corners=False) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = {'loss_mask': sigmoid_focal_loss(source_masks, target_masks, num_boxes), 'loss_dice': dice_loss(source_masks, target_masks, num_boxes)} return losses def _get_source_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(source, i) for (i, (source, _)) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return (batch_idx, source_idx) def _get_target_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(target, i) for (i, (_, target)) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return (batch_idx, target_idx) def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks} if loss not in loss_map: raise ValueError(f'Loss {loss} not supported') return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): outputs_without_aux = {k: v for (k, v) in outputs.items() if k != 'auxiliary_outputs'} indices = self.matcher(outputs_without_aux, targets) num_boxes = sum((len(t['class_labels']) for t in targets)) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) if 'auxiliary_outputs' in outputs: for (i, auxiliary_outputs) in enumerate(outputs['auxiliary_outputs']): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == 'masks': continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f'_{i}': v for (k, v) in l_dict.items()} losses.update(l_dict) return losses class DetrMLPPredictionHead(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim]))) def forward(self, x): for (i, layer) in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class DetrHungarianMatcher(nn.Module): def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1): super().__init__() requires_backends(self, ['scipy']) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0): raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): (batch_size, num_queries) = outputs['logits'].shape[:2] out_prob = outputs['logits'].flatten(0, 1).softmax(-1) out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) class_cost = -out_prob[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for (i, c) in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for (i, j) in indices] def _upcast(t: Tensor) -> Tensor: if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() def box_area(boxes: Tensor) -> Tensor: boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (right_bottom - left_top).clamp(min=0) inter = width_height[:, :, 0] * width_height[:, :, 1] union = area1[:, None] + area2 - inter iou = inter / union return (iou, union) def generalized_box_iou(boxes1, boxes2): if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}') if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}') (iou, union) = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area def _max_by_axis(the_list): maxes = the_list[0] for sublist in the_list[1:]: for (index, item) in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return (self.tensors, self.mask) def __repr__(self): return str(self.tensors) def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size (batch_size, num_channels, height, width) = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for (img, pad_img, m) in zip(tensor_list, tensor, mask): pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img) m[:img.shape[1], :img.shape[2]] = False else: raise ValueError('Only 3-dimensional tensors are supported') return NestedTensor(tensor, mask) # File: transformers-main/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py import argparse import os import torch from transformers.utils import WEIGHTS_NAME DIALOGPT_MODELS = ['small', 'medium', 'large'] OLD_KEY = 'lm_head.decoder.weight' NEW_KEY = 'lm_head.weight' def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str): d = torch.load(checkpoint_path) d[NEW_KEY] = d.pop(OLD_KEY) os.makedirs(pytorch_dump_folder_path, exist_ok=True) torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) args = parser.parse_args() for MODEL in DIALOGPT_MODELS: checkpoint_path = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl') pytorch_dump_folder_path = f'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint(checkpoint_path, pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/dinat/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_dinat': ['DinatConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_dinat'] = ['DinatForImageClassification', 'DinatModel', 'DinatPreTrainedModel', 'DinatBackbone'] if TYPE_CHECKING: from .configuration_dinat import DinatConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dinat import DinatBackbone, DinatForImageClassification, DinatModel, DinatPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/dinat/configuration_dinat.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class DinatConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'dinat' attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.kernel_size = kernel_size self.dilations = dilations self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) # File: transformers-main/src/transformers/models/dinat/modeling_dinat.py """""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_dinat import DinatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DinatConfig' _CHECKPOINT_FOR_DOC = 'shi-labs/dinat-mini-in1k-224' _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] _IMAGE_CLASS_CHECKPOINT = 'shi-labs/dinat-mini-in1k-224' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' @dataclass class DinatEncoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DinatModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DinatImageClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None class DinatEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.patch_embeddings = DinatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class DinatPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() patch_size = config.patch_size (num_channels, hidden_size) = (config.num_channels, config.embed_dim) self.num_channels = num_channels if patch_size == 4: pass else: raise ValueError('Dinat only supports patch size of 4 at the moment.') self.projection = nn.Sequential(nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: (_, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class DinatDownsampler(nn.Module): def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class DinatDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size, dilation): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size self.dilation = dilation self.rpb = nn.Parameter(torch.zeros(num_heads, 2 * self.kernel_size - 1, 2 * self.kernel_size - 1)) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = query_layer / math.sqrt(self.attention_head_size) attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, self.dilation) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, self.dilation) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size, dilation): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class DinatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class DinatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DinatLayer(nn.Module): def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.dilation = dilation self.window_size = self.kernel_size * self.dilation self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation) self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DinatIntermediate(config, dim) self.output = DinatOutput(config, dim) self.layer_scale_parameters = nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None def maybe_pad(self, hidden_states, height, width): window_size = self.window_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, torch.Tensor]: (batch_size, height, width, channels) = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) (hidden_states, pad_values) = self.maybe_pad(hidden_states, height, width) (_, height_pad, width_pad, _) = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class DinatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList([DinatLayer(config=config, dim=dim, num_heads=num_heads, dilation=dilations[i], drop_path_rate=drop_path_rate[i]) for i in range(depth)]) if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: (_, height, width, _) = hidden_states.size() for (i, layer_module) in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class DinatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList([DinatStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], dilations=config.dilations[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=DinatDownsampler if i_layer < self.num_levels - 1 else None) for i_layer in range(self.num_levels)]) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, DinatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for (i, layer_module) in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return DinatEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states) class DinatPreTrainedModel(PreTrainedModel): config_class = DinatConfig base_model_prefix = 'dinat' main_input_name = 'pixel_values' def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) DINAT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`DinatConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DINAT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]\n for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare Dinat Model transformer outputting raw hidden-states without any specific head on top.', DINAT_START_DOCSTRING) class DinatModel(DinatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ['natten']) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = DinatEmbeddings(config) self.encoder = DinatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=DinatModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, DinatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DinatModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states) @add_start_docstrings('\n Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ', DINAT_START_DOCSTRING) class DinatForImageClassification(DinatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ['natten']) self.num_labels = config.num_labels self.dinat = DinatModel(config) self.classifier = nn.Linear(self.dinat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=DinatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, DinatImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.dinat(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return DinatImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states) @add_start_docstrings('NAT backbone, to be used with frameworks like DETR and MaskFormer.', DINAT_START_DOCSTRING) class DinatBackbone(DinatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ['natten']) self.embeddings = DinatEmbeddings(config) self.encoder = DinatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))] hidden_states_norms = {} for (stage, num_channels) in zip(self._out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(DINAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for (stage, hidden_state) in zip(self.stage_names, hidden_states): if stage in self.out_features: (batch_size, num_channels, height, width) = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/dinov2/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _import_structure = {'configuration_dinov2': ['Dinov2Config', 'Dinov2OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_dinov2'] = ['Dinov2ForImageClassification', 'Dinov2Model', 'Dinov2PreTrainedModel', 'Dinov2Backbone'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_dinov2'] = ['FlaxDinov2ForImageClassification', 'FlaxDinov2Model', 'FlaxDinov2PreTrainedModel'] if TYPE_CHECKING: from .configuration_dinov2 import Dinov2Config, Dinov2OnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dinov2 import Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model, Dinov2PreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_dinov2 import FlaxDinov2ForImageClassification, FlaxDinov2Model, FlaxDinov2PreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/dinov2/configuration_dinov2.py """""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class Dinov2Config(BackboneConfigMixin, PretrainedConfig): model_type = 'dinov2' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, layerscale_value=1.0, drop_path_rate=0.0, use_swiglu_ffn=False, out_features=None, out_indices=None, apply_layernorm=True, reshape_hidden_states=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.layerscale_value = layerscale_value self.drop_path_rate = drop_path_rate self.use_swiglu_ffn = use_swiglu_ffn self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, num_hidden_layers + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states class Dinov2OnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 0.0001 # File: transformers-main/src/transformers/models/dinov2/convert_dinov2_to_hf.py """""" import argparse import json from pathlib import Path import requests import torch import torch.nn as nn from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, Dinov2Config, Dinov2ForImageClassification, Dinov2Model from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dinov2_config(model_name, image_classifier=False): config = Dinov2Config(image_size=518, patch_size=14) if 'vits' in model_name: config.hidden_size = 384 config.num_attention_heads = 6 elif 'vitb' in model_name: pass elif 'vitl' in model_name: config.hidden_size = 1024 config.num_hidden_layers = 24 config.num_attention_heads = 16 elif 'vitg' in model_name: config.use_swiglu_ffn = True config.hidden_size = 1536 config.num_hidden_layers = 40 config.num_attention_heads = 24 else: raise ValueError('Model not supported') if image_classifier: repo_id = 'huggingface/label-files' filename = 'imagenet-1k-id2label.json' config.num_labels = 1000 config.id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) config.id2label = {int(k): v for (k, v) in config.id2label.items()} return config def create_rename_keys(config): rename_keys = [] rename_keys.append(('cls_token', 'embeddings.cls_token')) rename_keys.append(('mask_token', 'embeddings.mask_token')) rename_keys.append(('pos_embed', 'embeddings.position_embeddings')) rename_keys.append(('patch_embed.proj.weight', 'embeddings.patch_embeddings.projection.weight')) rename_keys.append(('patch_embed.proj.bias', 'embeddings.patch_embeddings.projection.bias')) for i in range(config.num_hidden_layers): rename_keys.append((f'blocks.{i}.norm1.weight', f'encoder.layer.{i}.norm1.weight')) rename_keys.append((f'blocks.{i}.norm1.bias', f'encoder.layer.{i}.norm1.bias')) rename_keys.append((f'blocks.{i}.norm2.weight', f'encoder.layer.{i}.norm2.weight')) rename_keys.append((f'blocks.{i}.norm2.bias', f'encoder.layer.{i}.norm2.bias')) if config.use_swiglu_ffn: rename_keys.append((f'blocks.{i}.mlp.w12.weight', f'encoder.layer.{i}.mlp.w12.weight')) rename_keys.append((f'blocks.{i}.mlp.w12.bias', f'encoder.layer.{i}.mlp.w12.bias')) rename_keys.append((f'blocks.{i}.mlp.w3.weight', f'encoder.layer.{i}.mlp.w3.weight')) rename_keys.append((f'blocks.{i}.mlp.w3.bias', f'encoder.layer.{i}.mlp.w3.bias')) else: rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'encoder.layer.{i}.mlp.fc1.weight')) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'encoder.layer.{i}.mlp.fc1.bias')) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'encoder.layer.{i}.mlp.fc2.weight')) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'encoder.layer.{i}.mlp.fc2.bias')) rename_keys.append((f'blocks.{i}.ls1.gamma', f'encoder.layer.{i}.layer_scale1.lambda1')) rename_keys.append((f'blocks.{i}.ls2.gamma', f'encoder.layer.{i}.layer_scale2.lambda1')) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append(('norm.weight', 'layernorm.weight')) rename_keys.append(('norm.bias', 'layernorm.bias')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): in_proj_weight = state_dict.pop(f'blocks.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'blocks.{i}.attn.qkv.bias') state_dict[f'encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:config.hidden_size] state_dict[f'encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[config.hidden_size:config.hidden_size * 2] state_dict[f'encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-config.hidden_size:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') return image @torch.no_grad() def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): image_classifier = '1layer' in model_name config = get_dinov2_config(model_name, image_classifier=image_classifier) original_model = torch.hub.load('facebookresearch/dinov2', model_name.replace('_1layer', '')) original_model.eval() state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) for (key, val) in state_dict.copy().items(): val = state_dict.pop(key) if 'w12' in key: key = key.replace('w12', 'weights_in') if 'w3' in key: key = key.replace('w3', 'weights_out') state_dict[key] = val if image_classifier: model = Dinov2ForImageClassification(config).eval() model.dinov2.load_state_dict(state_dict) model_name_to_classifier_dict_url = {'dinov2_vits14_1layer': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth', 'dinov2_vitb14_1layer': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth', 'dinov2_vitl14_1layer': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth', 'dinov2_vitg14_1layer': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth'} url = model_name_to_classifier_dict_url[model_name] classifier_state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu') model.classifier.weight = nn.Parameter(classifier_state_dict['weight']) model.classifier.bias = nn.Parameter(classifier_state_dict['bias']) else: model = Dinov2Model(config).eval() model.load_state_dict(state_dict) image = prepare_img() transformations = transforms.Compose([transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)]) original_pixel_values = transformations(image).unsqueeze(0) processor = BitImageProcessor(size={'shortest_edge': 256}, resample=PILImageResampling.BICUBIC, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD) pixel_values = processor(image, return_tensors='pt').pixel_values assert torch.allclose(original_pixel_values, pixel_values) with torch.no_grad(): outputs = model(pixel_values, output_hidden_states=True) original_outputs = original_model(pixel_values) if image_classifier: print('Predicted class:') class_idx = outputs.logits.argmax(-1).item() print(model.config.id2label[class_idx]) else: assert outputs.last_hidden_state[:, 0].shape == original_outputs.shape assert torch.allclose(outputs.last_hidden_state[:, 0], original_outputs, atol=0.001) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving image processor to {pytorch_dump_folder_path}') processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_name_to_hf_name = {'dinov2_vits14': 'dinov2-small', 'dinov2_vitb14': 'dinov2-base', 'dinov2_vitl14': 'dinov2-large', 'dinov2_vitg14': 'dinov2-giant', 'dinov2_vits14_1layer': 'dinov2-small-imagenet1k-1-layer', 'dinov2_vitb14_1layer': 'dinov2-base-imagenet1k-1-layer', 'dinov2_vitl14_1layer': 'dinov2-large-imagenet1k-1-layer', 'dinov2_vitg14_1layer': 'dinov2-giant-imagenet1k-1-layer'} name = model_name_to_hf_name[model_name] model.push_to_hub(f'facebook/{name}') processor.push_to_hub(f'facebook/{name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='dinov2_vitb14', type=str, choices=['dinov2_vits14', 'dinov2_vitb14', 'dinov2_vitl14', 'dinov2_vitg14', 'dinov2_vits14_1layer', 'dinov2_vitb14_1layer', 'dinov2_vitl14_1layer', 'dinov2_vitg14_1layer'], help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.') args = parser.parse_args() convert_dinov2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/dinov2/modeling_dinov2.py """""" import collections.abc import math from typing import Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ...utils.backbone_utils import BackboneMixin from .configuration_dinov2 import Dinov2Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'Dinov2Config' _CHECKPOINT_FOR_DOC = 'facebook/dinov2-base' _EXPECTED_OUTPUT_SHAPE = [1, 257, 768] _IMAGE_CLASS_CHECKPOINT = 'facebook/dinov2-small-imagenet1k-1-layer' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' class Dinov2Embeddings(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size)) self.patch_embeddings = Dinov2PatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) target_dtype = patch_pos_embed.dtype patch_pos_embed = nn.functional.interpolate(patch_pos_embed.to(torch.float32), size=(new_height, new_width), mode='bicubic', align_corners=False).to(dtype=target_dtype) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor: (batch_size, _, height, width) = pixel_values.shape target_dtype = self.patch_embeddings.projection.weight.dtype embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype)) if bool_masked_pos is not None: embeddings = torch.where(bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings) cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) embeddings = self.dropout(embeddings) return embeddings class Dinov2PatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError(f'Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected {self.num_channels} but got {num_channels}.') embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings class Dinov2SelfAttention(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class Dinov2SelfOutput(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class Dinov2Attention(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() self.attention = Dinov2SelfAttention(config) self.output = Dinov2SelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class Dinov2LayerScale(nn.Module): def __init__(self, config) -> None: super().__init__() self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size)) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: return hidden_state * self.lambda1 def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class Dinov2DropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Dinov2MLP(nn.Module): def __init__(self, config) -> None: super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) self.fc1 = nn.Linear(in_features, hidden_features, bias=True) if isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act self.fc2 = nn.Linear(hidden_features, out_features, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.fc2(hidden_state) return hidden_state class Dinov2SwiGLUFFN(nn.Module): def __init__(self, config) -> None: super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True) self.weights_out = nn.Linear(hidden_features, out_features, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.weights_in(hidden_state) (x1, x2) = hidden_state.chunk(2, dim=-1) hidden = nn.functional.silu(x1) * x2 return self.weights_out(hidden) class Dinov2Layer(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = Dinov2Attention(config) self.layer_scale1 = Dinov2LayerScale(config) self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_swiglu_ffn: self.mlp = Dinov2SwiGLUFFN(config) else: self.mlp = Dinov2MLP(config) self.layer_scale2 = Dinov2LayerScale(config) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention(self.norm1(hidden_states), head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] attention_output = self.layer_scale1(attention_output) outputs = self_attention_outputs[1:] hidden_states = self.drop_path(attention_output) + hidden_states layer_output = self.norm2(hidden_states) layer_output = self.mlp(layer_output) layer_output = self.layer_scale2(layer_output) layer_output = self.drop_path(layer_output) + hidden_states outputs = (layer_output,) + outputs return outputs class Dinov2Encoder(nn.Module): def __init__(self, config: Dinov2Config) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class Dinov2PreTrainedModel(PreTrainedModel): config_class = Dinov2Config base_model_prefix = 'dinov2' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _no_split_modules = ['Dinov2SwiGLUFFN'] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, Dinov2Embeddings): module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype) module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.cls_token.dtype) DINOV2_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DINOV2_BASE_INPUTS_DOCSTRING = "\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`BitImageProcessor.preprocess`] for details.\n\n bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):\n Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for\n pre-training.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" DINOV2_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`BitImageProcessor.preprocess`] for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.', DINOV2_START_DOCSTRING) class Dinov2Model(Dinov2PreTrainedModel): def __init__(self, config: Dinov2Config): super().__init__(config) self.config = config self.embeddings = Dinov2Embeddings(config) self.encoder = Dinov2Encoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self) -> Dinov2PatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = sequence_output[:, 0, :] if not return_dict: head_outputs = (sequence_output, pooled_output) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('\n Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ', DINOV2_START_DOCSTRING) class Dinov2ForImageClassification(Dinov2PreTrainedModel): def __init__(self, config: Dinov2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.dinov2 = Dinov2Model(config) self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.dinov2(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] cls_token = sequence_output[:, 0] patch_tokens = sequence_output[:, 1:] linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1) logits = self.classifier(linear_input) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.\n ', DINOV2_START_DOCSTRING) class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = Dinov2Embeddings(config) self.encoder = Dinov2Encoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self) -> Dinov2PatchEmbeddings: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder(embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for (stage, hidden_state) in zip(self.stage_names, hidden_states): if stage in self.out_features: if self.config.apply_layernorm: hidden_state = self.layernorm(hidden_state) if self.config.reshape_hidden_states: hidden_state = hidden_state[:, 1:] (batch_size, _, height, width) = pixel_values.shape patch_size = self.config.patch_size hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions if output_attentions else None) # File: transformers-main/src/transformers/models/dinov2/modeling_flax_dinov2.py """""" import collections.abc import math from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxSequenceClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward from .configuration_dinov2 import Dinov2Config DINOV2_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`Dinov2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' DINOV2_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`Dinov2ImageProcessor.__call__`]\n for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class FlaxDinov2PatchEmbeddings(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): image_size = self.config.image_size patch_size = self.config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.num_patches = num_patches self.num_channels = self.config.num_channels self.projection = nn.Conv(self.config.hidden_size, kernel_size=patch_size, strides=patch_size, padding='VALID', dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal')) def __call__(self, pixel_values): num_channels = pixel_values.shape[-1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values) (batch_size, _, _, channels) = embeddings.shape return jnp.reshape(embeddings, (batch_size, -1, channels)) class FlaxDinov2Embeddings(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.cls_token = self.param('cls_token', jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), (1, 1, self.config.hidden_size)) self.mask_token = self.param('mask_token', jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), (1, self.config.hidden_size)) self.patch_embeddings = FlaxDinov2PatchEmbeddings(self.config, dtype=self.dtype) num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.param('position_embeddings', jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), (1, num_patches + 1, self.config.hidden_size)) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def interpolate_pos_encoding(self, config, hidden_states, height, width, position_embeddings): num_patches = hidden_states.shape[1] - 1 num_positions = position_embeddings.shape[1] - 1 if num_patches == num_positions and height == width: return position_embeddings class_pos_embed = position_embeddings[:, 0] patch_pos_embed = position_embeddings[:, 1:] dim = hidden_states.shape[-1] h = height // config.patch_size w = width // config.patch_size (height, width) = (h + 0.1, w + 0.1) patch_pos_embed = patch_pos_embed.reshape((1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)) patch_pos_embed = jnp.transpose(patch_pos_embed, (0, 3, 1, 2)) target_dtype = patch_pos_embed.dtype new_height_ratio = jnp.float32(height / math.sqrt(num_positions)) new_width_ratio = jnp.float32(width / math.sqrt(num_positions)) scale = jnp.array([new_height_ratio, new_width_ratio], dtype=jnp.float32) translation = jnp.array([0.0, 0.0], dtype=jnp.float32) patch_pos_embed = jax.image.scale_and_translate(patch_pos_embed.astype(jnp.float32), shape=(patch_pos_embed.shape[0], patch_pos_embed.shape[1], h, w), spatial_dims=(2, 3), scale=scale, translation=translation, method='bicubic', antialias=False) patch_pos_embed = patch_pos_embed.astype(target_dtype) patch_pos_embed = jnp.transpose(patch_pos_embed, (0, 2, 3, 1)).reshape((hidden_states.shape[0], -1, dim)) return jnp.concatenate((class_pos_embed[jnp.newaxis, :], patch_pos_embed), axis=1) def __call__(self, pixel_values, deterministic=True): batch_size = pixel_values.shape[0] target_dtype = self.patch_embeddings.projection.dtype (height, width) = (pixel_values.shape[1], pixel_values.shape[2]) embeddings = self.patch_embeddings(pixel_values.astype(target_dtype)) cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size)) embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1) embeddings = embeddings + self.interpolate_pos_encoding(self.config, embeddings, height, width, self.position_embeddings) embeddings = self.dropout(embeddings, deterministic=deterministic) return embeddings class FlaxDinov2SelfAttention(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError('`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`: {self.config.num_attention_heads}') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, mode='fan_in', distribution='truncated_normal'), use_bias=self.config.qkv_bias) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, mode='fan_in', distribution='truncated_normal'), use_bias=self.config.qkv_bias) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, mode='fan_in', distribution='truncated_normal'), use_bias=self.config.qkv_bias) def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) value_states = self.value(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) key_states = self.key(hidden_states).reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)) dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxDinov2SelfOutput(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxDinov2Attention(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxDinov2SelfAttention(self.config, dtype=self.dtype) self.output = FlaxDinov2SelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True, output_attentions: bool=False): attn_outputs = self.attention(hidden_states, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs def ones_with_scale(key, shape, scale, dtype=jnp.float32): return jnp.ones(shape, dtype) * scale class FlaxDinov2LayerScale(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.lambda1 = self.config.layerscale_value * self.param('lambda1', jax.nn.initializers.ones, (self.config.hidden_size,)) self.lambda1 = self.lambda1 * self.config.layerscale_value def __call__(self, hidden_states): return self.lambda1 * hidden_states class FlaxDinov2DropPath(nn.Module): rate: float @nn.module.compact def __call__(self, inputs, deterministic: Optional[bool]=True): if self.rate == 0.0: return inputs keep_prob = 1.0 - self.rate if deterministic: return inputs else: shape = (inputs.shape[0],) + (1,) * (inputs.ndim - 1) rng = self.make_rng('droppath') random_tensor = keep_prob + jax.random.uniform(rng, shape=shape, dtype=inputs.dtype) binary_tensor = jnp.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output class FlaxDinov2MLP(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.fc1 = nn.Dense(self.config.hidden_size * self.config.mlp_ratio, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), dtype=self.dtype) self.fc2 = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), dtype=self.dtype) if isinstance(self.config.hidden_act, str): self.act = ACT2FN[self.config.hidden_act] else: self.act = self.config.hidden_act def __call__(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class FlaxDinov2SwiGLUFFN(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): hidden_features = int(self.config.hidden_size * self.config.mlp_ratio) hidden_features = (int(self.hidden_features * 2 / 3) + 7) // 8 * 8 self.weights_in = nn.Dense(2 * hidden_features, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), dtype=self.dtype) self.weights_out = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal'), dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.weights_in(hidden_states) (x1, x2) = jnp.split(hidden_states, 2, axis=-1) hidden = nn.silu(x1) * x2 return self.weights_out(hidden) class FlaxDinov2Layer(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.attention = FlaxDinov2Attention(self.config, dtype=self.dtype) self.layer_scale1 = FlaxDinov2LayerScale(self.config, dtype=self.dtype) self.drop_path = FlaxDinov2DropPath(self.config.drop_path_rate) self.norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) if self.config.use_swiglu_ffn: self.mlp = FlaxDinov2SwiGLUFFN(self.config, dtype=self.dtype) else: self.mlp = FlaxDinov2MLP(self.config, dtype=self.dtype) self.layer_scale2 = FlaxDinov2LayerScale(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False): self_attention_outputs = self.attention(self.norm1(hidden_states), deterministic=deterministic, output_attentions=output_attentions) attention_output = self_attention_outputs[0] attention_output = self.layer_scale1(attention_output) outputs = self_attention_outputs[1:] hidden_states = self.drop_path(attention_output) + hidden_states layer_output = self.norm2(hidden_states) layer_output = self.mlp(layer_output) layer_output = self.layer_scale2(layer_output) layer_output = self.drop_path(layer_output) + hidden_states outputs = (layer_output,) + outputs return outputs class FlaxDinov2LayerCollection(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxDinov2Layer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxDinov2Encoder(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer = FlaxDinov2LayerCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxDinov2PreTrainedModel(FlaxPreTrainedModel): config_class = Dinov2Config base_model_prefix = 'dinov2' main_input_name = 'pixel_values' module_class: nn.Module = None def __init__(self, config: Dinov2Config, input_shape=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) if input_shape is None: input_shape = (1, config.image_size, config.image_size, config.num_channels) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: pixel_values = jnp.zeros(input_shape, dtype=self.dtype) (params_rng, dropout_rng) = jax.random.split(rng) (dropout_rng, droppath_rng) = jax.random.split(dropout_rng) rngs = {'params': params_rng, 'dropout': dropout_rng, 'droppath': droppath_rng} random_params = self.module.init(rngs, pixel_values, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, pixel_values, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) rngs = {} if dropout_rng is not None: (dropout_rng, droppath_rng) = jax.random.split(dropout_rng) rngs['dropout'] = dropout_rng rngs['droppath'] = droppath_rng return self.module.apply({'params': params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxDinov2Module(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.embeddings = FlaxDinov2Embeddings(self.config, dtype=self.dtype) self.encoder = FlaxDinov2Encoder(self.config, dtype=self.dtype) self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, pixel_values, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): hidden_states = self.embeddings(pixel_values, deterministic=deterministic) encoder_outputs = self.encoder(hidden_states, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = sequence_output[:, 0, :] if not return_dict: head_outputs = (sequence_output, pooled_output) return head_outputs + encoder_outputs[1:] return FlaxBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The bare Dinov2 Model transformer outputting raw hidden-states without any specific head on top.', DINOV2_START_DOCSTRING) class FlaxDinov2Model(FlaxDinov2PreTrainedModel): module_class = FlaxDinov2Module FLAX_VISION_MODEL_DOCSTRING = '\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoImageProcessor, FlaxDinov2Model\n >>> from PIL import Image\n >>> import requests\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")\n >>> model = FlaxDinov2Model.from_pretrained("facebook/dinov2-base")\n\n >>> inputs = image_processor(images=image, return_tensors="np")\n >>> outputs = model(**inputs)\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n' overwrite_call_docstring(FlaxDinov2Model, FLAX_VISION_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxDinov2Model, output_type=FlaxBaseModelOutputWithPooling, config_class=Dinov2Config) class FlaxDinov2ForImageClassificationModule(nn.Module): config: Dinov2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.dinov2 = FlaxDinov2Module(config=self.config, dtype=self.dtype) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.variance_scaling(self.config.initializer_range ** 2, 'fan_in', 'truncated_normal')) def __call__(self, pixel_values=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.dinov2(pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] cls_token = hidden_states[:, 0] patch_tokens = hidden_states[:, 1:] linear_input = jnp.concatenate([cls_token, patch_tokens.mean(axis=1)], axis=-1) logits = self.classifier(linear_input) if not return_dict: output = (logits,) + outputs[2:] return output return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n ', DINOV2_START_DOCSTRING) class FlaxDinov2ForImageClassification(FlaxDinov2PreTrainedModel): module_class = FlaxDinov2ForImageClassificationModule FLAX_VISION_CLASSIFICATION_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoImageProcessor, FlaxDinov2ForImageClassification\n >>> from PIL import Image\n >>> import jax\n >>> import requests\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base-imagenet1k-1-layer")\n >>> model = FlaxDinov2ForImageClassification.from_pretrained("facebook/dinov2-base-imagenet1k-1-layer")\n\n >>> inputs = image_processor(images=image, return_tensors="np")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n\n >>> # model predicts one of the 1000 ImageNet classes\n >>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1)\n >>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()])\n ```\n' overwrite_call_docstring(FlaxDinov2ForImageClassification, FLAX_VISION_CLASSIFICATION_DOCSTRING) append_replace_return_docstrings(FlaxDinov2ForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=Dinov2Config) # File: transformers-main/src/transformers/models/distilbert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_distilbert': ['DistilBertConfig', 'DistilBertOnnxConfig'], 'tokenization_distilbert': ['DistilBertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_distilbert_fast'] = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_distilbert'] = ['DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_distilbert'] = ['TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_distilbert'] = ['FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel'] if TYPE_CHECKING: from .configuration_distilbert import DistilBertConfig, DistilBertOnnxConfig from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/distilbert/configuration_distilbert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class DistilBertConfig(PretrainedConfig): model_type = 'distilbert' attribute_map = {'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers'} def __init__(self, vocab_size=30522, max_position_embeddings=512, sinusoidal_pos_embds=False, n_layers=6, n_heads=12, dim=768, hidden_dim=4 * 768, dropout=0.1, attention_dropout=0.1, activation='gelu', initializer_range=0.02, qa_dropout=0.1, seq_classif_dropout=0.2, pad_token_id=0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.sinusoidal_pos_embds = sinusoidal_pos_embds self.n_layers = n_layers self.n_heads = n_heads self.dim = dim self.hidden_dim = hidden_dim self.dropout = dropout self.attention_dropout = attention_dropout self.activation = activation self.initializer_range = initializer_range self.qa_dropout = qa_dropout self.seq_classif_dropout = seq_classif_dropout super().__init__(**kwargs, pad_token_id=pad_token_id) class DistilBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/distilbert/modeling_distilbert.py """""" import math from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import get_activation from ...configuration_utils import PretrainedConfig from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_distilbert import DistilBertConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'distilbert-base-uncased' _CONFIG_FOR_DOC = 'DistilBertConfig' def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor): if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(out, modifier_rank=0): if torch.distributed.get_rank() == 0: _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out) else: _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out) def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() class Embeddings(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor]=None) -> torch.Tensor: if input_ids is not None: input_embeds = self.word_embeddings(input_ids) seq_length = input_embeds.size(1) if hasattr(self, 'position_ids'): position_ids = self.position_ids[:, :seq_length] else: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = input_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.config = config self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.is_causal = False if self.dim % self.n_heads != 0: raise ValueError(f'self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly') self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads: Set[int] = set() self.attention_head_size = self.dim // self.n_heads def prune_heads(self, heads: List[int]): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.n_heads, self.attention_head_size, self.pruned_heads) self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) self.n_heads = self.n_heads - len(heads) self.dim = self.attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, ...]: (bs, q_length, dim) = query.size() k_length = key.size(1) dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x: torch.Tensor) -> torch.Tensor: return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x: torch.Tensor) -> torch.Tensor: return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) k = shape(self.k_lin(key)) v = shape(self.v_lin(value)) q = q / math.sqrt(dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) mask = (mask == 0).view(mask_reshp).expand_as(scores) scores = scores.masked_fill(mask, torch.tensor(torch.finfo(scores.dtype).min)) weights = nn.functional.softmax(scores, dim=-1) weights = self.dropout(weights) if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) context = unshape(context) context = self.out_lin(context) if output_attentions: return (context, weights) else: return (context,) class DistilBertFlashAttention2(MultiHeadSelfAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, ...]: (batch_size, q_length, dim) = query.size() dim_per_head = self.dim // self.n_heads def reshape(x: torch.Tensor) -> torch.Tensor: return x.view(batch_size, -1, self.n_heads, dim_per_head) query_states = reshape(self.q_lin(query)) key_states = reshape(self.k_lin(key)) value_states = reshape(self.v_lin(value)) attn_dropout = self.config.attention_dropout if self.training else 0.0 if query_states.dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_lin.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_weights = _flash_attention_forward(query_states, key_states, value_states, mask, q_length, dropout=attn_dropout, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal) attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head) attn_output = self.out_lin(attn_weights_reshaped) if output_attentions: return (attn_output, attn_weights) else: return (attn_output,) class FFN(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.dropout = nn.Dropout(p=config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim) self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim) self.activation = get_activation(config.activation) def forward(self, input: torch.Tensor) -> torch.Tensor: return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input) def ff_chunk(self, input: torch.Tensor) -> torch.Tensor: x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x) return x DISTILBERT_ATTENTION_CLASSES = {'eager': MultiHeadSelfAttention, 'flash_attention_2': DistilBertFlashAttention2} class TransformerBlock(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() if config.dim % config.n_heads != 0: raise ValueError(f'config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly') self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, ...]: sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask, output_attentions=output_attentions) if output_attentions: (sa_output, sa_weights) = sa_output else: if type(sa_output) is not tuple: raise TypeError(f'sa_output must be a tuple but it is {type(sa_output)} type') sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) ffn_output = self.ffn(sa_output) ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output class Transformer(nn.Module): def __init__(self, config: PretrainedConfig): super().__init__() self.n_layers = config.n_layers self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)]) self.gradient_checkpointing = False def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_state = x for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_state, attn_mask, head_mask[i], output_attentions) else: layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions) hidden_state = layer_outputs[-1] if output_attentions: if len(layer_outputs) != 2: raise ValueError(f'The length of the layer_outputs should be 2, but it is {len(layer_outputs)}') attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) elif len(layer_outputs) != 1: raise ValueError(f'The length of the layer_outputs should be 1, but it is {len(layer_outputs)}') if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions) class DistilBertPreTrainedModel(PreTrainedModel): config_class = DistilBertConfig load_tf_weights = None base_model_prefix = 'distilbert' supports_gradient_checkpointing = True _supports_flash_attn_2 = True def _init_weights(self, module: nn.Module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds: create_sinusoidal_embeddings(self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight) DISTILBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DISTILBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.', DISTILBERT_START_DOCSTRING) class DistilBertModel(DistilBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.embeddings = Embeddings(config) self.transformer = Transformer(config) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self.post_init() def get_position_embeddings(self) -> nn.Embedding: return self.embeddings.position_embeddings def resize_position_embeddings(self, new_num_position_embeddings: int): num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings if num_position_embeds_diff == 0: return logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...') self.config.max_position_embeddings = new_num_position_embeddings old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone() self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim) if self.config.sinusoidal_pos_embds: create_sinusoidal_embeddings(n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight) else: with torch.no_grad(): if num_position_embeds_diff > 0: self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(old_position_embeddings_weight) else: self.embeddings.position_embeddings.weight = nn.Parameter(old_position_embeddings_weight[:num_position_embeds_diff]) self.embeddings.position_embeddings.to(self.device) def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]): for (layer, heads) in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embeddings = self.embeddings(input_ids, inputs_embeds) if self._use_flash_attention_2: attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif attention_mask is None: attention_mask = torch.ones(input_shape, device=device) return self.transformer(x=embeddings, attn_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings('DistilBert Model with a `masked language modeling` head on top.', DISTILBERT_START_DOCSTRING) class DistilBertForMaskedLM(DistilBertPreTrainedModel): _tied_weights_keys = ['vocab_projector.weight'] def __init__(self, config: PretrainedConfig): super().__init__(config) self.activation = get_activation(config.activation) self.distilbert = DistilBertModel(config) self.vocab_transform = nn.Linear(config.dim, config.dim) self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12) self.vocab_projector = nn.Linear(config.dim, config.vocab_size) self.post_init() self.mlm_loss_fct = nn.CrossEntropyLoss() def get_position_embeddings(self) -> nn.Embedding: return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): self.distilbert.resize_position_embeddings(new_num_position_embeddings) def get_output_embeddings(self) -> nn.Module: return self.vocab_projector def set_output_embeddings(self, new_embeddings: nn.Module): self.vocab_projector = new_embeddings @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict dlbrt_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = dlbrt_output[0] prediction_logits = self.vocab_transform(hidden_states) prediction_logits = self.activation(prediction_logits) prediction_logits = self.vocab_layer_norm(prediction_logits) prediction_logits = self.vocab_projector(prediction_logits) mlm_loss = None if labels is not None: mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1)) if not return_dict: output = (prediction_logits,) + dlbrt_output[1:] return (mlm_loss,) + output if mlm_loss is not None else output return MaskedLMOutput(loss=mlm_loss, logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions) @add_start_docstrings('\n DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', DISTILBERT_START_DOCSTRING) class DistilBertForSequenceClassification(DistilBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.distilbert = DistilBertModel(config) self.pre_classifier = nn.Linear(config.dim, config.dim) self.classifier = nn.Linear(config.dim, config.num_labels) self.dropout = nn.Dropout(config.seq_classif_dropout) self.post_init() def get_position_embeddings(self) -> nn.Embedding: return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): self.distilbert.resize_position_embeddings(new_num_position_embeddings) @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = distilbert_output[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = nn.ReLU()(pooled_output) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + distilbert_output[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) @add_start_docstrings('\n DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', DISTILBERT_START_DOCSTRING) class DistilBertForQuestionAnswering(DistilBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.distilbert = DistilBertModel(config) self.qa_outputs = nn.Linear(config.dim, config.num_labels) if config.num_labels != 2: raise ValueError(f'config.num_labels should be 2, but it is {config.num_labels}') self.dropout = nn.Dropout(config.qa_dropout) self.post_init() def get_position_embeddings(self) -> nn.Embedding: return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): self.distilbert.resize_position_embeddings(new_num_position_embeddings) @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = distilbert_output[0] hidden_states = self.dropout(hidden_states) logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + distilbert_output[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) @add_start_docstrings('\n DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', DISTILBERT_START_DOCSTRING) class DistilBertForTokenClassification(DistilBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.num_labels = config.num_labels self.distilbert = DistilBertModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() def get_position_embeddings(self) -> nn.Embedding: return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): self.distilbert.resize_position_embeddings(new_num_position_embeddings) @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and\n a softmax) e.g. for RocStories/SWAG tasks.\n ', DISTILBERT_START_DOCSTRING) class DistilBertForMultipleChoice(DistilBertPreTrainedModel): def __init__(self, config: PretrainedConfig): super().__init__(config) self.distilbert = DistilBertModel(config) self.pre_classifier = nn.Linear(config.dim, config.dim) self.classifier = nn.Linear(config.dim, 1) self.dropout = nn.Dropout(config.seq_classif_dropout) self.post_init() def get_position_embeddings(self) -> nn.Embedding: return self.distilbert.get_position_embeddings() def resize_position_embeddings(self, new_num_position_embeddings: int): self.distilbert.resize_position_embeddings(new_num_position_embeddings) @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = nn.ReLU()(pooled_output) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/distilbert/modeling_flax_distilbert.py import math from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'distilbert-base-uncased' _CONFIG_FOR_DOC = 'DistilBertConfig' FLAX_DISTILBERT_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a\n [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as\n a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and\n behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DISTILBERT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, 2 * (i // 2) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) pos_encoding = angle_rads[np.newaxis, ...] return jnp.array(pos_encoding) class FlaxEmbeddings(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.word_embeddings = nn.Embed(self.config.vocab_size, self.config.dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) if not self.config.sinusoidal_pos_embds: self.position_embeddings = nn.Embed(self.config.max_position_embeddings, self.config.dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) else: self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim) self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.dropout) def __call__(self, input_ids, deterministic: bool=True): (batch_size, seq_length) = input_ids.shape inputs_embeds = self.word_embeddings(input_ids.astype('i4')) if not self.config.sinusoidal_pos_embds: position_ids = jnp.arange(seq_length).astype('i4') position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length)) position_embeds = self.position_embeddings(position_ids.astype('i4')) else: position_embeds = self.pos_encoding[:, :seq_length, :] position_embeds = position_embeds.astype(inputs_embeds.dtype) hidden_states = inputs_embeds + position_embeds hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxMultiHeadSelfAttention(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.n_heads = self.config.n_heads self.dim = self.config.dim self.dropout = nn.Dropout(rate=self.config.attention_dropout) if not self.dim % self.n_heads == 0: raise ValueError(f'Hidden size {self.dim} not dividable by number of heads {self.n_heads}') self.q_lin = nn.Dense(self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.k_lin = nn.Dense(self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.v_lin = nn.Dense(self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.out_lin = nn.Dense(self.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, query, key, value, mask, deterministic: bool=True, output_attentions: bool=False): (bs, q_len, dim) = query.shape k_len = key.shape[1] dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_len) def shape(x): return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3) def unshape(x): return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) k = shape(self.k_lin(key)) v = shape(self.v_lin(value)) q = q / math.sqrt(dim_per_head) scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) mask = jnp.reshape(mask, mask_reshp) mask = mask.astype(scores.dtype) scores = scores - 1e+30 * (1.0 - mask) weights = nn.softmax(scores, axis=-1) weights = self.dropout(weights, deterministic=deterministic) context = jnp.matmul(weights, v) context = unshape(context) context = self.out_lin(context) if output_attentions: return (context, weights) else: return (context,) class FlaxFFN(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout = nn.Dropout(rate=self.config.dropout) self.chunk_size_feed_forward = self.config.chunk_size_feed_forward self.seq_len_dim = 1 self.lin1 = nn.Dense(self.config.hidden_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.lin2 = nn.Dense(self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.activation = ACT2FN[self.config.activation] def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.lin1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.lin2(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxTransformerBlock(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): assert self.config.dim % self.config.n_heads == 0, f'Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}' self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype) self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) self.ffn = FlaxFFN(self.config, dtype=self.dtype) self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) def __call__(self, hidden_states, attn_mask, output_attentions: bool=False, deterministic: bool=True): sa_output = self.attention(query=hidden_states, key=hidden_states, value=hidden_states, mask=attn_mask, output_attentions=output_attentions, deterministic=deterministic) if output_attentions: (sa_output, sa_weights) = sa_output else: assert type(sa_output) is tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + hidden_states) ffn_output = self.ffn(sa_output, deterministic=deterministic) ffn_output = self.output_layer_norm(ffn_output + sa_output) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output class FlaxTransformer(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)] def __call__(self, hidden_states, attention_mask, output_attentions: bool=False, output_hidden_states: bool=False, deterministic: bool=True, return_dict: bool=False): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for layer_module in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states=hidden_states, attn_mask=attention_mask, output_attentions=output_attentions, deterministic=deterministic) hidden_states = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1 if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxTransformerEncoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.layer = FlaxTransformer(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, output_attentions: bool=False, output_hidden_states: bool=False, deterministic: bool=True, return_dict: bool=False): return self.layer(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict) class FlaxDistilBertLMDecoder(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.bias = self.param('bias', self.bias_init, (self.config.vocab_size,)) def __call__(self, inputs, kernel): inputs = jnp.asarray(inputs, self.dtype) kernel = jnp.asarray(kernel, self.dtype) y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ()))) bias = jnp.asarray(self.bias, self.dtype) y = y + bias return y class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel): config_class = DistilBertConfig base_model_prefix = 'distilbert' module_class: nn.Module = None def __init__(self, config: DistilBertConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, head_mask=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng return self.module.apply({'params': params or self.params}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs) class FlaxDistilBertModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype) self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict input_embeds = self.embeddings(input_ids, deterministic=deterministic) return self.transformer(hidden_states=input_embeds, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings('The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertModule append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC) class FlaxDistilBertForMaskedLMModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype) self.vocab_transform = nn.Dense(self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype) if self.config.tie_word_embeddings: self.vocab_projector = FlaxDistilBertLMDecoder(self.config, dtype=self.dtype) else: self.vocab_projector = nn.Dense(self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.use_return_dict dlbrt_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, return_dict=return_dict) hidden_states = dlbrt_output[0] prediction_logits = self.vocab_transform(hidden_states) prediction_logits = ACT2FN[self.config.activation](prediction_logits) prediction_logits = self.vocab_layer_norm(prediction_logits) if self.config.tie_word_embeddings: shared_embedding = self.distilbert.variables['params']['embeddings']['word_embeddings']['embedding'] prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T) else: prediction_logits = self.vocab_projector(prediction_logits) if not return_dict: output = (prediction_logits,) + dlbrt_output[1:] return output return FlaxMaskedLMOutput(logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions) @add_start_docstrings('DistilBert Model with a `language modeling` head on top.', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMaskedLMModule append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) class FlaxDistilBertForSequenceClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense(self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.use_return_dict distilbert_output = self.distilbert(input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = distilbert_output[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = ACT2FN['relu'](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + distilbert_output[1:] return FlaxSequenceClassifierOutput(logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) @add_start_docstrings('\n DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForSequenceClassificationModule append_call_sample_docstring(FlaxDistilBertForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxDistilBertForMultipleChoiceModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.pre_classifier = nn.Dense(self.config.dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None outputs = self.distilbert(input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_state = outputs[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = ACT2FN['relu'](pooled_output) pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput(logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and\n a softmax) e.g. for RocStories/SWAG tasks.\n ', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForMultipleChoiceModule overwrite_call_docstring(FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) append_call_sample_docstring(FlaxDistilBertForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC) class FlaxDistilBertForTokenClassificationModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.distilbert(input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForTokenClassificationModule append_call_sample_docstring(FlaxDistilBertForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC) class FlaxDistilBertForQuestionAnsweringModule(nn.Module): config: DistilBertConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) assert self.config.num_labels == 2 self.dropout = nn.Dropout(rate=self.config.qa_dropout) def __call__(self, input_ids, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return_dict = return_dict if return_dict is not None else self.config.use_return_dict distilbert_output = self.distilbert(input_ids, attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = distilbert_output[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + distilbert_output[1:] return FlaxQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) @add_start_docstrings('\n DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FLAX_DISTILBERT_START_DOCSTRING) class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel): module_class = FlaxDistilBertForQuestionAnsweringModule append_call_sample_docstring(FlaxDistilBertForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/distilbert/modeling_tf_distilbert.py """""" from __future__ import annotations import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'distilbert-base-uncased' _CONFIG_FOR_DOC = 'DistilBertConfig' class TFEmbeddings(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.dim = config.dim self.initializer_range = config.initializer_range self.max_position_embeddings = config.max_position_embeddings self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.dropout) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.dim], initializer=get_initializer(initializer_range=self.initializer_range)) with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.dim], initializer=get_initializer(initializer_range=self.initializer_range)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.dim]) def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False): assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) final_embeddings = inputs_embeds + position_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFMultiHeadSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_heads = config.n_heads self.dim = config.dim self.dropout = keras.layers.Dropout(config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0, f'Hidden size {self.dim} not dividable by number of heads {self.n_heads}' self.q_lin = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='q_lin') self.k_lin = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='k_lin') self.v_lin = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='v_lin') self.out_lin = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='out_lin') self.pruned_heads = set() self.config = config def prune_heads(self, heads): raise NotImplementedError def call(self, query, key, value, mask, head_mask, output_attentions, training=False): (bs, q_length, dim) = shape_list(query) k_length = shape_list(key)[1] dim_per_head = int(self.dim / self.n_heads) dim_per_head = tf.cast(dim_per_head, dtype=tf.int32) mask_reshape = [bs, 1, 1, k_length] def shape(x): return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) def unshape(x): return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) q = shape(self.q_lin(query)) k = shape(self.k_lin(key)) v = shape(self.v_lin(value)) q = tf.cast(q, dtype=tf.float32) q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32))) k = tf.cast(k, dtype=q.dtype) scores = tf.matmul(q, k, transpose_b=True) mask = tf.reshape(mask, mask_reshape) mask = tf.cast(mask, dtype=scores.dtype) scores = scores - 1e+30 * (1.0 - mask) weights = stable_softmax(scores, axis=-1) weights = self.dropout(weights, training=training) if head_mask is not None: weights = weights * head_mask context = tf.matmul(weights, v) context = unshape(context) context = self.out_lin(context) if output_attentions: return (context, weights) else: return (context,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_lin', None) is not None: with tf.name_scope(self.q_lin.name): self.q_lin.build([None, None, self.config.dim]) if getattr(self, 'k_lin', None) is not None: with tf.name_scope(self.k_lin.name): self.k_lin.build([None, None, self.config.dim]) if getattr(self, 'v_lin', None) is not None: with tf.name_scope(self.v_lin.name): self.v_lin.build([None, None, self.config.dim]) if getattr(self, 'out_lin', None) is not None: with tf.name_scope(self.out_lin.name): self.out_lin.build([None, None, self.config.dim]) class TFFFN(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dropout = keras.layers.Dropout(config.dropout) self.lin1 = keras.layers.Dense(config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name='lin1') self.lin2 = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='lin2') self.activation = get_tf_activation(config.activation) self.config = config def call(self, input, training=False): x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x, training=training) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'lin1', None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.config.dim]) if getattr(self, 'lin2', None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.config.hidden_dim]) class TFTransformerBlock(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_heads = config.n_heads self.dim = config.dim self.hidden_dim = config.hidden_dim self.dropout = keras.layers.Dropout(config.dropout) self.activation = config.activation self.output_attentions = config.output_attentions assert config.dim % config.n_heads == 0, f'Hidden size {config.dim} not dividable by number of heads {config.n_heads}' self.attention = TFMultiHeadSelfAttention(config, name='attention') self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name='sa_layer_norm') self.ffn = TFFFN(config, name='ffn') self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name='output_layer_norm') self.config = config def call(self, x, attn_mask, head_mask, output_attentions, training=False): sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training) if output_attentions: (sa_output, sa_weights) = sa_output else: sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) ffn_output = self.ffn(sa_output, training=training) ffn_output = self.output_layer_norm(ffn_output + sa_output) output = (ffn_output,) if output_attentions: output = (sa_weights,) + output return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'sa_layer_norm', None) is not None: with tf.name_scope(self.sa_layer_norm.name): self.sa_layer_norm.build([None, None, self.config.dim]) if getattr(self, 'ffn', None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build(None) if getattr(self, 'output_layer_norm', None) is not None: with tf.name_scope(self.output_layer_norm.name): self.output_layer_norm.build([None, None, self.config.dim]) class TFTransformer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.n_layers = config.n_layers self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.layer = [TFTransformerBlock(config, name=f'layer_._{i}') for i in range(config.n_layers)] def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_state = x for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training) hidden_state = layer_outputs[-1] if output_attentions: assert len(layer_outputs) == 2 attentions = layer_outputs[0] all_attentions = all_attentions + (attentions,) else: assert len(layer_outputs) == 1, f'Incorrect number of outputs {len(layer_outputs)} instead of 1' if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_state,) if not return_dict: return tuple((v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFDistilBertMainLayer(keras.layers.Layer): config_class = DistilBertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_hidden_layers = config.num_hidden_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFEmbeddings(config, name='embeddings') self.transformer = TFTransformer(config, name='transformer') def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.ones(input_shape) attention_mask = tf.cast(attention_mask, dtype=tf.float32) if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) tfmr_output = self.transformer(embedding_output, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training) return tfmr_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) class TFDistilBertPreTrainedModel(TFPreTrainedModel): config_class = DistilBertConfig base_model_prefix = 'distilbert' DISTILBERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DISTILBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.', DISTILBERT_START_DOCSTRING) class TFDistilBertModel(TFDistilBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name='distilbert') @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) class TFDistilBertLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.dim = config.dim self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {'bias': self.bias} def set_bias(self, value): self.bias = value['bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states @add_start_docstrings('DistilBert Model with a `masked language modeling` head on top.', DISTILBERT_START_DOCSTRING) class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.distilbert = TFDistilBertMainLayer(config, name='distilbert') self.vocab_transform = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), name='vocab_transform') self.act = get_tf_activation(config.activation) self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name='vocab_layer_norm') self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name='vocab_projector') def get_lm_head(self): return self.vocab_projector def get_prefix_bias_name(self): warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.vocab_projector.name @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = distilbert_output[0] prediction_logits = self.vocab_transform(hidden_states) prediction_logits = self.act(prediction_logits) prediction_logits = self.vocab_layer_norm(prediction_logits) prediction_logits = self.vocab_projector(prediction_logits) loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits) if not return_dict: output = (prediction_logits,) + distilbert_output[1:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, 'vocab_transform', None) is not None: with tf.name_scope(self.vocab_transform.name): self.vocab_transform.build([None, None, self.config.dim]) if getattr(self, 'vocab_layer_norm', None) is not None: with tf.name_scope(self.vocab_layer_norm.name): self.vocab_layer_norm.build([None, None, self.config.dim]) if getattr(self, 'vocab_projector', None) is not None: with tf.name_scope(self.vocab_projector.name): self.vocab_projector.build(None) @add_start_docstrings('\n DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', DISTILBERT_START_DOCSTRING) class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.distilbert = TFDistilBertMainLayer(config, name='distilbert') self.pre_classifier = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), activation='relu', name='pre_classifier') self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.dropout = keras.layers.Dropout(config.seq_classif_dropout) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_state = distilbert_output[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + distilbert_output[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, 'pre_classifier', None) is not None: with tf.name_scope(self.pre_classifier.name): self.pre_classifier.build([None, None, self.config.dim]) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.dim]) @add_start_docstrings('\n DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', DISTILBERT_START_DOCSTRING) class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.distilbert = TFDistilBertMainLayer(config, name='distilbert') self.dropout = keras.layers.Dropout(config.dropout) self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: outputs = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and\n a softmax) e.g. for RocStories/SWAG tasks.\n ', DISTILBERT_START_DOCSTRING) class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name='distilbert') self.dropout = keras.layers.Dropout(config.seq_classif_dropout) self.pre_classifier = keras.layers.Dense(config.dim, kernel_initializer=get_initializer(config.initializer_range), activation='relu', name='pre_classifier') self.classifier = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_inputs_embeds = tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None distilbert_output = self.distilbert(flat_input_ids, flat_attention_mask, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) hidden_state = distilbert_output[0] pooled_output = hidden_state[:, 0] pooled_output = self.pre_classifier(pooled_output) pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + distilbert_output[1:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, 'pre_classifier', None) is not None: with tf.name_scope(self.pre_classifier.name): self.pre_classifier.build([None, None, self.config.dim]) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.dim]) @add_start_docstrings('\n DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a\n linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', DISTILBERT_START_DOCSTRING) class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.distilbert = TFDistilBertMainLayer(config, name='distilbert') self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') assert config.num_labels == 2, f'Incorrect number of labels {config.num_labels} instead of 2' self.dropout = keras.layers.Dropout(config.qa_dropout) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = distilbert_output[0] hidden_states = self.dropout(hidden_states, training=training) logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + distilbert_output[1:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'distilbert', None) is not None: with tf.name_scope(self.distilbert.name): self.distilbert.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.dim]) # File: transformers-main/src/transformers/models/distilbert/tokenization_distilbert.py """""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class DistilBertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/distilbert/tokenization_distilbert_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class DistilBertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = DistilBertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config, has_lm_head=False, is_semantic=False): prefix = 'backbone.' if is_semantic else '' rename_keys = [] for i in range(config.num_hidden_layers): rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias')) rename_keys.extend([(f'{prefix}cls_token', 'beit.embeddings.cls_token'), (f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'), (f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'), (f'{prefix}pos_embed', 'beit.embeddings.position_embeddings')]) if has_lm_head: rename_keys.extend([('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias')]) else: rename_keys.extend([('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias')]) return rename_keys def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False): for i in range(config.num_hidden_layers): prefix = 'backbone.' if is_semantic else '' in_proj_weight = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight') q_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias') state_dict[f'beit.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.query.bias'] = q_bias state_dict[f'beit.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'beit.encoder.layer.{i}.attention.attention.value.bias'] = v_bias gamma_1 = state_dict.pop(f'{prefix}blocks.{i}.gamma_1') gamma_2 = state_dict.pop(f'{prefix}blocks.{i}.gamma_2') state_dict[f'beit.encoder.layer.{i}.lambda_1'] = gamma_1 state_dict[f'beit.encoder.layer.{i}.lambda_2'] = gamma_2 def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub=False): has_lm_head = False if 'rvlcdip' in checkpoint_url else True config = BeitConfig(use_absolute_position_embeddings=True, use_mask_token=has_lm_head) if 'large' in checkpoint_url or 'dit-l' in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 if 'rvlcdip' in checkpoint_url: config.num_labels = 16 repo_id = 'huggingface/label-files' filename = 'rvlcdip-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu')['model'] rename_keys = create_rename_keys(config, has_lm_head=has_lm_head) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head) model = BeitForMaskedImageModeling(config) if has_lm_head else BeitForImageClassification(config) model.eval() model.load_state_dict(state_dict) image_processor = BeitImageProcessor(size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False) image = prepare_img() encoding = image_processor(images=image, return_tensors='pt') pixel_values = encoding['pixel_values'] outputs = model(pixel_values) logits = outputs.logits expected_shape = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(expected_shape), 'Shape of logits not as expected' Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: if has_lm_head: model_name = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: model_name = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add image processor', use_temp_dir=True) model.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add model', use_temp_dir=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true') args = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/donut/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_donut_swin': ['DonutSwinConfig'], 'processing_donut': ['DonutProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_donut_swin'] = ['DonutSwinModel', 'DonutSwinPreTrainedModel'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_donut'] = ['DonutFeatureExtractor'] _import_structure['image_processing_donut'] = ['DonutImageProcessor'] if TYPE_CHECKING: from .configuration_donut_swin import DonutSwinConfig from .processing_donut import DonutProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_donut_swin import DonutSwinModel, DonutSwinPreTrainedModel try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_donut import DonutFeatureExtractor from .image_processing_donut import DonutImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/donut/configuration_donut_swin.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DonutSwinConfig(PretrainedConfig): model_type = 'donut-swin' attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, **kwargs): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) # File: transformers-main/src/transformers/models/donut/convert_donut_to_pytorch.py """""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast def get_configs(model): original_config = model.config encoder_config = DonutSwinConfig(image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128) decoder_config = MBartConfig(is_decoder=True, is_encoder_decoder=False, add_cross_attention=True, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(model.decoder.tokenizer), scale_embedding=True, add_final_layer_norm=True) return (encoder_config, decoder_config) def rename_key(name): if 'encoder.model' in name: name = name.replace('encoder.model', 'encoder') if 'decoder.model' in name: name = name.replace('decoder.model', 'decoder') if 'patch_embed.proj' in name: name = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection') if 'patch_embed.norm' in name: name = name.replace('patch_embed.norm', 'embeddings.norm') if name.startswith('encoder'): if 'layers' in name: name = 'encoder.' + name if 'attn.proj' in name: name = name.replace('attn.proj', 'attention.output.dense') if 'attn' in name and 'mask' not in name: name = name.replace('attn', 'attention.self') if 'norm1' in name: name = name.replace('norm1', 'layernorm_before') if 'norm2' in name: name = name.replace('norm2', 'layernorm_after') if 'mlp.fc1' in name: name = name.replace('mlp.fc1', 'intermediate.dense') if 'mlp.fc2' in name: name = name.replace('mlp.fc2', 'output.dense') if name == 'encoder.norm.weight': name = 'encoder.layernorm.weight' if name == 'encoder.norm.bias': name = 'encoder.layernorm.bias' return name def convert_state_dict(orig_state_dict, model): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if 'qkv' in key: key_split = key.split('.') layer_num = int(key_split[3]) block_num = int(key_split[5]) dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if 'weight' in key: orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight'] = val[:dim, :] orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight'] = val[dim:dim * 2, :] orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight'] = val[-dim:, :] else: orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias'] = val[:dim] orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias'] = val[dim:dim * 2] orig_state_dict[f'encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias'] = val[-dim:] elif 'attn_mask' in key or key in ['encoder.model.norm.weight', 'encoder.model.norm.bias']: pass else: orig_state_dict[rename_key(key)] = val return orig_state_dict def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): original_model = DonutModel.from_pretrained(model_name).eval() (encoder_config, decoder_config) = get_configs(original_model) encoder = DonutSwinModel(encoder_config) decoder = MBartForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) dataset = load_dataset('hf-internal-testing/example-documents') image = dataset['test'][0]['image'].convert('RGB') tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) image_processor = DonutImageProcessor(do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1]) processor = DonutProcessor(image_processor, tokenizer) pixel_values = processor(image, return_tensors='pt').pixel_values if model_name == 'naver-clova-ix/donut-base-finetuned-docvqa': task_prompt = '{user_input}' question = 'When is the coffee break?' task_prompt = task_prompt.replace('{user_input}', question) elif model_name == 'naver-clova-ix/donut-base-finetuned-rvlcdip': task_prompt = '' elif model_name in ['naver-clova-ix/donut-base-finetuned-cord-v1', 'naver-clova-ix/donut-base-finetuned-cord-v1-2560']: task_prompt = '' elif model_name == 'naver-clova-ix/donut-base-finetuned-cord-v2': task_prompt = 's_cord-v2>' elif model_name == 'naver-clova-ix/donut-base-finetuned-zhtrainticket': task_prompt = '' elif model_name in ['naver-clova-ix/donut-proto', 'naver-clova-ix/donut-base']: task_prompt = 'hello world' else: raise ValueError('Model name not supported') prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors='pt')['input_ids'] original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) (patch_embeddings, _) = model.encoder.embeddings(pixel_values) assert torch.allclose(original_patch_embed, patch_embeddings, atol=0.001) original_last_hidden_state = original_model.encoder(pixel_values) last_hidden_state = model.encoder(pixel_values).last_hidden_state assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=0.01) original_logits = original_model(pixel_values, prompt_tensors, None).logits logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits assert torch.allclose(original_logits, logits, atol=0.001) print('Looks ok!') if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub('nielsr/' + model_name.split('/')[-1], commit_message='Update model') processor.push_to_hub('nielsr/' + model_name.split('/')[-1], commit_message='Update model') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help="Name of the original model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.') args = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/donut/feature_extraction_donut.py """""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor logger = logging.get_logger(__name__) class DonutFeatureExtractor(DonutImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DonutImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/donut/image_processing_donut.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import get_resize_output_image_size, pad, resize, to_channel_dimension_format from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, logging from ...utils.import_utils import is_vision_available logger = logging.get_logger(__name__) if is_vision_available(): import PIL class DonutImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_thumbnail: bool=True, do_align_long_axis: bool=False, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 2560, 'width': 1920} if isinstance(size, (tuple, list)): size = size[::-1] size = get_size_dict(size) self.do_resize = do_resize self.size = size self.resample = resample self.do_thumbnail = do_thumbnail self.do_align_long_axis = do_align_long_axis self.do_pad = do_pad self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def align_long_axis(self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = (size['height'], size['width']) if output_width < output_height and input_width > input_height or (output_width > output_height and input_width < input_height): image = np.rot90(image, 3) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def pad_image(self, image: np.ndarray, size: Dict[str, int], random_padding: bool=False, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (output_height, output_width) = (size['height'], size['width']) (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) delta_width = output_width - input_width delta_height = output_height - input_height if random_padding: pad_top = np.random.randint(low=0, high=delta_height + 1) pad_left = np.random.randint(low=0, high=delta_width + 1) else: pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = ((pad_top, pad_bottom), (pad_left, pad_right)) return pad(image, padding, data_format=data_format, input_data_format=input_data_format) def pad(self, *args, **kwargs): logger.info('pad is deprecated and will be removed in version 4.27. Please use pad_image instead.') return self.pad_image(*args, **kwargs) def thumbnail(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = (size['height'], size['width']) height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) return resize(image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs) def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) shortest_edge = min(size['height'], size['width']) output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format) resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return resized_image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_thumbnail: bool=None, do_align_long_axis: bool=None, do_pad: bool=None, random_padding: bool=False, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size if isinstance(size, (tuple, list)): size = size[::-1] size = get_size_dict(size) resample = resample if resample is not None else self.resample do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_align_long_axis: images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_thumbnail: images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] if do_pad: images = [self.pad_image(image=image, size=size, random_padding=random_padding, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/donut/modeling_donut_swin.py """""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, torch_int from .configuration_donut_swin import DonutSwinConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DonutSwinConfig' _CHECKPOINT_FOR_DOC = 'https://huggingface.co/naver-clova-ix/donut-base' _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] @dataclass class DonutSwinEncoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DonutSwinModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None def window_partition(input_feature, window_size): (batch_size, height, width, num_channels) = input_feature.shape input_feature = input_feature.view(batch_size, height // window_size, window_size, width // window_size, window_size, num_channels) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows def window_reverse(windows, window_size, height, width): num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows class DonutSwinEmbeddings(nn.Module): def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = DonutSwinPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> Tuple[torch.Tensor]: (_, num_channels, height, width) = pixel_values.shape (embeddings, output_dimensions) = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) (batch_size, seq_len, _) = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return (embeddings, output_dimensions) class DonutSwinPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.embed_dim) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: (_, num_channels, height, width) = pixel_values.shape pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) (_, _, height, width) = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return (embeddings, output_dimensions) class DonutSwinPatchMerging(nn.Module): def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = height % 2 == 1 or width % 2 == 1 if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: (height, width) = input_dimensions (batch_size, dim, num_channels) = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) input_feature = self.maybe_pad(input_feature, height, width) input_feature_0 = input_feature[:, 0::2, 0::2, :] input_feature_1 = input_feature[:, 1::2, 0::2, :] input_feature_2 = input_feature[:, 0::2, 1::2, :] input_feature_3 = input_feature[:, 1::2, 1::2, :] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class DonutSwinDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class DonutSwinSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)) coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij')) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer('relative_position_index', relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: (batch_size, dim, num_channels) = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class DonutSwinSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DonutSwinAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size) self.output = DonutSwinSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class DonutSwinIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class DonutSwinOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DonutSwinLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DonutSwinIntermediate(config, dim) self.output = DonutSwinOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: self.shift_size = torch_int(0) self.window_size = torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution) def get_attn_mask(self, height, width, dtype, device): if self.shift_size > 0: img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device) height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass (height, width) = input_dimensions (batch_size, _, channels) = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) (hidden_states, pad_values) = self.maybe_pad(hidden_states, height, width) (_, height_pad, width_pad, _) = hidden_states.shape if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device) attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class DonutSwinStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList([DonutSwinLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)]) if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> Tuple[torch.Tensor]: (height, width) = input_dimensions for (i, layer_module) in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: (height_downsampled, width_downsampled) = ((height + 1) // 2, (width + 1) // 2) output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class DonutSwinEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_layers = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.layers = nn.ModuleList([DonutSwinStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=DonutSwinPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, DonutSwinEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for (i, layer_module) in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) else: layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: (batch_size, _, hidden_size) = hidden_states_before_downsampling.shape reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return DonutSwinEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states) class DonutSwinPreTrainedModel(PreTrainedModel): config_class = DonutSwinConfig base_model_prefix = 'swin' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _no_split_modules = ['DonutSwinStage'] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) SWIN_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' SWIN_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`DonutImageProcessor.__call__`] for details.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.', SWIN_START_DOCSTRING) class DonutSwinModel(DonutSwinPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token) self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=DonutSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[Tuple, DonutSwinModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') head_mask = self.get_head_mask(head_mask, len(self.config.depths)) (embedding_output, input_dimensions) = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DonutSwinModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states) # File: transformers-main/src/transformers/models/donut/processing_donut.py """""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class DonutProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self._in_target_context_manager = False def __call__(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor(*args, **kwargs) images = kwargs.pop('images', None) text = kwargs.pop('text', None) if len(args) > 0: images = args[0] args = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.') if images is not None: inputs = self.image_processor(images, *args, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif images is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.image_processor self._in_target_context_manager = False def token2json(self, tokens, is_inner_value=False, added_vocab=None): if added_vocab is None: added_vocab = self.tokenizer.get_added_vocab() output = {} while tokens: start_token = re.search('', tokens, re.IGNORECASE) if start_token is None: break key = start_token.group(1) key_escaped = re.escape(key) end_token = re.search(f'', tokens, re.IGNORECASE) start_token = start_token.group() if end_token is None: tokens = tokens.replace(start_token, '') else: end_token = end_token.group() start_token_escaped = re.escape(start_token) end_token_escaped = re.escape(end_token) content = re.search(f'{start_token_escaped}(.*?){end_token_escaped}', tokens, re.IGNORECASE | re.DOTALL) if content is not None: content = content.group(1).strip() if ''): leaf = leaf.strip() if leaf in added_vocab and leaf[0] == '<' and (leaf[-2:] == '/>'): leaf = leaf[1:-2] output[key].append(leaf) if len(output[key]) == 1: output[key] = output[key][0] tokens = tokens[tokens.find(end_token) + len(end_token):].strip() if tokens[:6] == '': return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab) if len(output): return [output] if is_inner_value else output else: return [] if is_inner_value else {'text_sequence': tokens} @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class @property def feature_extractor(self): warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning) return self.image_processor # File: transformers-main/src/transformers/models/dpr/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_dpr': ['DPRConfig'], 'tokenization_dpr': ['DPRContextEncoderTokenizer', 'DPRQuestionEncoderTokenizer', 'DPRReaderOutput', 'DPRReaderTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_dpr_fast'] = ['DPRContextEncoderTokenizerFast', 'DPRQuestionEncoderTokenizerFast', 'DPRReaderTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_dpr'] = ['DPRContextEncoder', 'DPRPretrainedContextEncoder', 'DPRPreTrainedModel', 'DPRPretrainedQuestionEncoder', 'DPRPretrainedReader', 'DPRQuestionEncoder', 'DPRReader'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_dpr'] = ['TFDPRContextEncoder', 'TFDPRPretrainedContextEncoder', 'TFDPRPretrainedQuestionEncoder', 'TFDPRPretrainedReader', 'TFDPRQuestionEncoder', 'TFDPRReader'] if TYPE_CHECKING: from .configuration_dpr import DPRConfig from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_dpr_fast import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpr import DPRContextEncoder, DPRPretrainedContextEncoder, DPRPreTrainedModel, DPRPretrainedQuestionEncoder, DPRPretrainedReader, DPRQuestionEncoder, DPRReader try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_dpr import TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, TFDPRPretrainedReader, TFDPRQuestionEncoder, TFDPRReader else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/dpr/configuration_dpr.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DPRConfig(PretrainedConfig): model_type = 'dpr' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', projection_dim: int=0, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.projection_dim = projection_dim self.position_embedding_type = position_embedding_type # File: transformers-main/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py import argparse import collections from pathlib import Path import torch from torch.serialization import default_restore_location from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader CheckpointState = collections.namedtuple('CheckpointState', ['model_dict', 'optimizer_dict', 'scheduler_dict', 'offset', 'epoch', 'encoder_params']) def load_states_from_checkpoint(model_file: str) -> CheckpointState: print(f'Reading saved model from {model_file}') state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, 'cpu')) return CheckpointState(**state_dict) class DPRState: def __init__(self, src_file: Path): self.src_file = src_file def load_dpr_model(self): raise NotImplementedError @staticmethod def from_type(comp_type: str, *args, **kwargs) -> 'DPRState': if comp_type.startswith('c'): return DPRContextEncoderState(*args, **kwargs) if comp_type.startswith('q'): return DPRQuestionEncoderState(*args, **kwargs) if comp_type.startswith('r'): return DPRReaderState(*args, **kwargs) else: raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.") class DPRContextEncoderState(DPRState): def load_dpr_model(self): model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0])) print(f'Loading DPR biencoder from {self.src_file}') saved_state = load_states_from_checkpoint(self.src_file) (encoder, prefix) = (model.ctx_encoder, 'ctx_model.') state_dict = {'bert_model.embeddings.position_ids': model.ctx_encoder.bert_model.embeddings.position_ids} for (key, value) in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix):] if not key.startswith('encode_proj.'): key = 'bert_model.' + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRQuestionEncoderState(DPRState): def load_dpr_model(self): model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0])) print(f'Loading DPR biencoder from {self.src_file}') saved_state = load_states_from_checkpoint(self.src_file) (encoder, prefix) = (model.question_encoder, 'question_model.') state_dict = {'bert_model.embeddings.position_ids': model.question_encoder.bert_model.embeddings.position_ids} for (key, value) in saved_state.model_dict.items(): if key.startswith(prefix): key = key[len(prefix):] if not key.startswith('encode_proj.'): key = 'bert_model.' + key state_dict[key] = value encoder.load_state_dict(state_dict) return model class DPRReaderState(DPRState): def load_dpr_model(self): model = DPRReader(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0])) print(f'Loading DPR reader from {self.src_file}') saved_state = load_states_from_checkpoint(self.src_file) state_dict = {'encoder.bert_model.embeddings.position_ids': model.span_predictor.encoder.bert_model.embeddings.position_ids} for (key, value) in saved_state.model_dict.items(): if key.startswith('encoder.') and (not key.startswith('encoder.encode_proj')): key = 'encoder.bert_model.' + key[len('encoder.'):] state_dict[key] = value model.span_predictor.load_state_dict(state_dict) return model def convert(comp_type: str, src_file: Path, dest_dir: Path): dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) dpr_state = DPRState.from_type(comp_type, src_file=src_file) model = dpr_state.load_dpr_model() model.save_pretrained(dest_dir) model.from_pretrained(dest_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--type', type=str, help="Type of the component to convert: 'ctx_encoder', 'question_encoder' or 'reader'.") parser.add_argument('--src', type=str, help="Path to the dpr checkpoint file. They can be downloaded from the official DPR repo https://github.com/facebookresearch/DPR. Note that in the official repo, both encoders are stored in the 'retriever' checkpoints.") parser.add_argument('--dest', type=str, default=None, help='Path to the output PyTorch model directory.') args = parser.parse_args() src_file = Path(args.src) dest_dir = f'converted-{src_file.name}' if args.dest is None else args.dest dest_dir = Path(dest_dir) assert src_file.exists() assert args.type is not None, "Please specify the component type of the DPR model to convert: 'ctx_encoder', 'question_encoder' or 'reader'." convert(args.type, src_file, dest_dir) # File: transformers-main/src/transformers/models/dpr/modeling_dpr.py """""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import Tensor, nn from ...modeling_outputs import BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..bert.modeling_bert import BertModel from .configuration_dpr import DPRConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DPRConfig' _CHECKPOINT_FOR_DOC = 'facebook/dpr-ctx_encoder-single-nq-base' @dataclass class DPRContextEncoderOutput(ModelOutput): pooler_output: torch.FloatTensor hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DPRQuestionEncoderOutput(ModelOutput): pooler_output: torch.FloatTensor hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class DPRReaderOutput(ModelOutput): start_logits: torch.FloatTensor end_logits: torch.FloatTensor = None relevance_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class DPRPreTrainedModel(PreTrainedModel): _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class DPREncoder(DPRPreTrainedModel): base_model_prefix = 'bert_model' def __init__(self, config: DPRConfig): super().__init__(config) self.bert_model = BertModel(config, add_pooling_layer=False) if self.bert_model.config.hidden_size <= 0: raise ValueError("Encoder hidden_size can't be zero") self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim) self.post_init() def forward(self, input_ids: Tensor, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]: outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] pooled_output = sequence_output[:, 0, :] if self.projection_dim > 0: pooled_output = self.encode_proj(pooled_output) if not return_dict: return (sequence_output, pooled_output) + outputs[2:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @property def embeddings_size(self) -> int: if self.projection_dim > 0: return self.encode_proj.out_features return self.bert_model.config.hidden_size class DPRSpanPredictor(DPRPreTrainedModel): base_model_prefix = 'encoder' def __init__(self, config: DPRConfig): super().__init__(config) self.encoder = DPREncoder(config) self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2) self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1) self.post_init() def forward(self, input_ids: Tensor, attention_mask: Tensor, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]: (n_passages, sequence_length) = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2] outputs = self.encoder(input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() relevance_logits = self.qa_classifier(sequence_output[:, 0, :]) start_logits = start_logits.view(n_passages, sequence_length) end_logits = end_logits.view(n_passages, sequence_length) relevance_logits = relevance_logits.view(n_passages) if not return_dict: return (start_logits, end_logits, relevance_logits) + outputs[2:] return DPRReaderOutput(start_logits=start_logits, end_logits=end_logits, relevance_logits=relevance_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class DPRPretrainedContextEncoder(DPRPreTrainedModel): config_class = DPRConfig load_tf_weights = None base_model_prefix = 'ctx_encoder' class DPRPretrainedQuestionEncoder(DPRPreTrainedModel): config_class = DPRConfig load_tf_weights = None base_model_prefix = 'question_encoder' class DPRPretrainedReader(DPRPreTrainedModel): config_class = DPRConfig load_tf_weights = None base_model_prefix = 'span_predictor' DPR_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`DPRConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DPR_ENCODERS_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be\n formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs (for a pair title+text for example):\n\n ```\n tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n ```\n\n (b) For single sequences (for a question for example):\n\n ```\n tokens: [CLS] the dog is hairy . [SEP]\n token_type_ids: 0 0 0 0 0 0 0\n ```\n\n DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right\n rather than the left.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" DPR_READER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question\n and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should\n be formatted with [CLS] and [SEP] with the format:\n\n `[CLS] [SEP] [SEP] `\n\n DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right\n rather than the left.\n\n Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `(n_passages, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n inputs_embeds (`torch.FloatTensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare DPRContextEncoder transformer outputting pooler outputs as context representations.', DPR_START_DOCSTRING) class DPRContextEncoder(DPRPretrainedContextEncoder): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.ctx_encoder = DPREncoder(config) self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if input_ids is None else input_ids != self.config.pad_token_id if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) outputs = self.ctx_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs[1:] return DPRContextEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.', DPR_START_DOCSTRING) class DPRQuestionEncoder(DPRPretrainedQuestionEncoder): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.question_encoder = DPREncoder(config) self.post_init() @add_start_docstrings_to_model_forward(DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if input_ids is None else input_ids != self.config.pad_token_id if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) outputs = self.question_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return outputs[1:] return DPRQuestionEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('The bare DPRReader transformer outputting span predictions.', DPR_START_DOCSTRING) class DPRReader(DPRPretrainedReader): def __init__(self, config: DPRConfig): super().__init__(config) self.config = config self.span_predictor = DPRSpanPredictor(config) self.post_init() @add_start_docstrings_to_model_forward(DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DPRReaderOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRReaderOutput, Tuple[Tensor, ...]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) return self.span_predictor(input_ids, attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) # File: transformers-main/src/transformers/models/dpr/modeling_tf_dpr.py """""" from __future__ import annotations from dataclasses import dataclass from typing import Tuple, Union import tensorflow as tf from ...modeling_tf_outputs import TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, shape_list, unpack_inputs from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..bert.modeling_tf_bert import TFBertMainLayer from .configuration_dpr import DPRConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DPRConfig' @dataclass class TFDPRContextEncoderOutput(ModelOutput): pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFDPRQuestionEncoderOutput(ModelOutput): pooler_output: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None @dataclass class TFDPRReaderOutput(ModelOutput): start_logits: tf.Tensor = None end_logits: tf.Tensor = None relevance_logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor, ...] | None = None attentions: Tuple[tf.Tensor, ...] | None = None class TFDPREncoderLayer(keras.layers.Layer): base_model_prefix = 'bert_model' def __init__(self, config: DPRConfig, **kwargs): super().__init__(**kwargs) self.bert_model = TFBertMainLayer(config, add_pooling_layer=False, name='bert_model') self.config = config if self.config.hidden_size <= 0: raise ValueError("Encoder hidden_size can't be zero") self.projection_dim = config.projection_dim if self.projection_dim > 0: self.encode_proj = keras.layers.Dense(config.projection_dim, kernel_initializer=get_initializer(config.initializer_range), name='encode_proj') @unpack_inputs def call(self, input_ids: tf.Tensor=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool=None, output_hidden_states: bool=None, return_dict: bool=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor, ...]]: outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] pooled_output = sequence_output[:, 0, :] if self.projection_dim > 0: pooled_output = self.encode_proj(pooled_output) if not return_dict: return (sequence_output, pooled_output) + outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @property def embeddings_size(self) -> int: if self.projection_dim > 0: return self.projection_dim return self.bert_model.config.hidden_size def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'bert_model', None) is not None: with tf.name_scope(self.bert_model.name): self.bert_model.build(None) if getattr(self, 'encode_proj', None) is not None: with tf.name_scope(self.encode_proj.name): self.encode_proj.build(None) class TFDPRSpanPredictorLayer(keras.layers.Layer): base_model_prefix = 'encoder' def __init__(self, config: DPRConfig, **kwargs): super().__init__(**kwargs) self.config = config self.encoder = TFDPREncoderLayer(config, name='encoder') self.qa_outputs = keras.layers.Dense(2, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.qa_classifier = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='qa_classifier') @unpack_inputs def call(self, input_ids: tf.Tensor=None, attention_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False, training: bool=False) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: (n_passages, sequence_length) = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:2] outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) relevance_logits = self.qa_classifier(sequence_output[:, 0, :]) start_logits = tf.reshape(start_logits, [n_passages, sequence_length]) end_logits = tf.reshape(end_logits, [n_passages, sequence_length]) relevance_logits = tf.reshape(relevance_logits, [n_passages]) if not return_dict: return (start_logits, end_logits, relevance_logits) + outputs[2:] return TFDPRReaderOutput(start_logits=start_logits, end_logits=end_logits, relevance_logits=relevance_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.encoder.embeddings_size]) if getattr(self, 'qa_classifier', None) is not None: with tf.name_scope(self.qa_classifier.name): self.qa_classifier.build([None, None, self.encoder.embeddings_size]) class TFDPRSpanPredictor(TFPreTrainedModel): base_model_prefix = 'encoder' def __init__(self, config: DPRConfig, **kwargs): super().__init__(config, **kwargs) self.encoder = TFDPRSpanPredictorLayer(config) @unpack_inputs def call(self, input_ids: tf.Tensor=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False, training: bool=False) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs class TFDPREncoder(TFPreTrainedModel): base_model_prefix = 'encoder' def __init__(self, config: DPRConfig, **kwargs): super().__init__(config, **kwargs) self.encoder = TFDPREncoderLayer(config) @unpack_inputs def call(self, input_ids: tf.Tensor=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False, training: bool=False) -> Union[TFDPRReaderOutput, Tuple[tf.Tensor, ...]]: outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs class TFDPRPretrainedContextEncoder(TFPreTrainedModel): config_class = DPRConfig base_model_prefix = 'ctx_encoder' class TFDPRPretrainedQuestionEncoder(TFPreTrainedModel): config_class = DPRConfig base_model_prefix = 'question_encoder' class TFDPRPretrainedReader(TFPreTrainedModel): config_class = DPRConfig base_model_prefix = 'reader' TF_DPR_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)\n subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to\n general usage and behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`DPRConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' TF_DPR_ENCODERS_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be\n formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs (for a pair title+text for example):\n\n ```\n tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n ```\n\n (b) For single sequences (for a question for example):\n\n ```\n tokens: [CLS] the dog is hairy . [SEP]\n token_type_ids: 0 0 0 0 0 0 0\n ```\n\n DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right\n rather than the left.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" TF_DPR_READER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question\n and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should\n be formatted with [CLS] and [SEP] with the format:\n\n `[CLS] [SEP] [SEP] `\n\n DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right\n rather than the left.\n\n Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.\n attention_mask (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare DPRContextEncoder transformer outputting pooler outputs as context representations.', TF_DPR_START_DOCSTRING) class TFDPRContextEncoder(TFDPRPretrainedContextEncoder): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.ctx_encoder = TFDPREncoderLayer(config, name='ctx_encoder') def get_input_embeddings(self): try: return self.ctx_encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.ctx_encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, training: bool=False) -> TFDPRContextEncoderOutput | Tuple[tf.Tensor, ...]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32) if input_ids is None else input_ids != self.config.pad_token_id if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32) outputs = self.ctx_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: return outputs[1:] return TFDPRContextEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'ctx_encoder', None) is not None: with tf.name_scope(self.ctx_encoder.name): self.ctx_encoder.build(None) @add_start_docstrings('The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.', TF_DPR_START_DOCSTRING) class TFDPRQuestionEncoder(TFDPRPretrainedQuestionEncoder): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.question_encoder = TFDPREncoderLayer(config, name='question_encoder') def get_input_embeddings(self): try: return self.question_encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.question_encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, training: bool=False) -> TFDPRQuestionEncoderOutput | Tuple[tf.Tensor, ...]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32) if input_ids is None else input_ids != self.config.pad_token_id if token_type_ids is None: token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32) outputs = self.question_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: return outputs[1:] return TFDPRQuestionEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'question_encoder', None) is not None: with tf.name_scope(self.question_encoder.name): self.question_encoder.build(None) @add_start_docstrings('The bare DPRReader transformer outputting span predictions.', TF_DPR_START_DOCSTRING) class TFDPRReader(TFDPRPretrainedReader): def __init__(self, config: DPRConfig, *args, **kwargs): super().__init__(config, *args, **kwargs) self.span_predictor = TFDPRSpanPredictorLayer(config, name='span_predictor') def get_input_embeddings(self): try: return self.span_predictor.encoder.bert_model.get_input_embeddings() except AttributeError: self.build() return self.span_predictor.encoder.bert_model.get_input_embeddings() @unpack_inputs @add_start_docstrings_to_model_forward(TF_DPR_READER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFDPRReaderOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: bool | None=None, output_hidden_states: bool | None=None, return_dict: bool | None=None, training: bool=False) -> TFDPRReaderOutput | Tuple[tf.Tensor, ...]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32) return self.span_predictor(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'span_predictor', None) is not None: with tf.name_scope(self.span_predictor.name): self.span_predictor.build(None) # File: transformers-main/src/transformers/models/dpr/tokenization_dpr.py """""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class DPRContextEncoderTokenizer(BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES class DPRQuestionEncoderTokenizer(BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES DPRSpanPrediction = collections.namedtuple('DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']) DPRReaderOutput = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) CUSTOM_DPR_READER_DOCSTRING = "\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] [SEP] [SEP] \n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING) class CustomDPRReaderTokenizerMixin: def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[bool, str]=False, truncation: Union[bool, str]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding: if titles is None and texts is None: return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs) elif titles is None or texts is None: text_pair = titles if texts is None else texts return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs) titles = titles if not isinstance(titles, str) else [titles] texts = texts if not isinstance(texts, str) else [texts] n_passages = len(titles) questions = questions if not isinstance(questions, str) else [questions] * n_passages if len(titles) != len(texts): raise ValueError(f'There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts.') encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids'] encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids'] encoded_inputs = {'input_ids': [(encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for (encoded_question_and_title, encoded_text) in zip(encoded_question_and_titles, encoded_texts)]} if return_attention_mask is not False: attention_mask = [] for input_ids in encoded_inputs['input_ids']: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) encoded_inputs['attention_mask'] = attention_mask return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors) def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> List[DPRSpanPrediction]: input_ids = reader_input['input_ids'] (start_logits, end_logits, relevance_logits) = reader_output[:3] n_passages = len(relevance_logits) sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__) nbest_spans_predictions: List[DPRReaderOutput] = [] for doc_id in sorted_docs: sequence_ids = list(input_ids[doc_id]) passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 if sequence_ids[-1] == self.pad_token_id: sequence_len = sequence_ids.index(self.pad_token_id) else: sequence_len = len(sequence_ids) best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage) for (start_index, end_index) in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append(DPRSpanPrediction(span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:end_index + 1]))) if len(nbest_spans_predictions) >= num_spans: break return nbest_spans_predictions[:num_spans] def _get_best_spans(self, start_logits: List[int], end_logits: List[int], max_answer_length: int, top_spans: int) -> List[DPRSpanPrediction]: scores = [] for (start_index, start_score) in enumerate(start_logits): for (answer_length, end_score) in enumerate(end_logits[start_index:start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) scores = sorted(scores, key=lambda x: x[1], reverse=True) chosen_span_intervals = [] for ((start_index, end_index), score) in scores: if start_index > end_index: raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]') length = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'Span is too long: {length} > {max_answer_length}') if any((start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals)): continue chosen_span_intervals.append((start_index, end_index)) if len(chosen_span_intervals) == top_spans: break return chosen_span_intervals @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING) class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] # File: transformers-main/src/transformers/models/dpr/tokenization_dpr_fast.py """""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class DPRContextEncoderTokenizerFast(BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = DPRContextEncoderTokenizer class DPRQuestionEncoderTokenizerFast(BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = DPRQuestionEncoderTokenizer DPRSpanPrediction = collections.namedtuple('DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']) DPRReaderOutput = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) CUSTOM_DPR_READER_DOCSTRING = "\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] [SEP] [SEP] \n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING) class CustomDPRReaderTokenizerMixin: def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[bool, str]=False, truncation: Union[bool, str]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding: if titles is None and texts is None: return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs) elif titles is None or texts is None: text_pair = titles if texts is None else texts return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs) titles = titles if not isinstance(titles, str) else [titles] texts = texts if not isinstance(texts, str) else [texts] n_passages = len(titles) questions = questions if not isinstance(questions, str) else [questions] * n_passages assert len(titles) == len(texts), f'There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts.' encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids'] encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids'] encoded_inputs = {'input_ids': [(encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for (encoded_question_and_title, encoded_text) in zip(encoded_question_and_titles, encoded_texts)]} if return_attention_mask is not False: attention_mask = [] for input_ids in encoded_inputs['input_ids']: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) encoded_inputs['attention_mask'] = attention_mask return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors) def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> List[DPRSpanPrediction]: input_ids = reader_input['input_ids'] (start_logits, end_logits, relevance_logits) = reader_output[:3] n_passages = len(relevance_logits) sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__) nbest_spans_predictions: List[DPRReaderOutput] = [] for doc_id in sorted_docs: sequence_ids = list(input_ids[doc_id]) passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 if sequence_ids[-1] == self.pad_token_id: sequence_len = sequence_ids.index(self.pad_token_id) else: sequence_len = len(sequence_ids) best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage) for (start_index, end_index) in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append(DPRSpanPrediction(span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:end_index + 1]))) if len(nbest_spans_predictions) >= num_spans: break return nbest_spans_predictions[:num_spans] def _get_best_spans(self, start_logits: List[int], end_logits: List[int], max_answer_length: int, top_spans: int) -> List[DPRSpanPrediction]: scores = [] for (start_index, start_score) in enumerate(start_logits): for (answer_length, end_score) in enumerate(end_logits[start_index:start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) scores = sorted(scores, key=lambda x: x[1], reverse=True) chosen_span_intervals = [] for ((start_index, end_index), score) in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' length = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any((start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals)): continue chosen_span_intervals.append((start_index, end_index)) if len(chosen_span_intervals) == top_spans: break return chosen_span_intervals @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING) class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = DPRReaderTokenizer # File: transformers-main/src/transformers/models/dpt/__init__.py from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable _import_structure = {'configuration_dpt': ['DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_dpt'] = ['DPTFeatureExtractor'] _import_structure['image_processing_dpt'] = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_dpt'] = ['DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel'] if TYPE_CHECKING: from .configuration_dpt import DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/dpt/configuration_dpt.py """""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING from ..bit import BitConfig logger = logging.get_logger(__name__) class DPTConfig(PretrainedConfig): model_type = 'dpt' def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=16, num_channels=3, is_hybrid=False, qkv_bias=True, backbone_out_indices=[2, 5, 8, 11], readout_type='project', reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, add_projection=False, use_auxiliary_head=True, auxiliary_loss_weight=0.4, semantic_loss_ignore_index=255, semantic_classifier_dropout=0.1, backbone_featmap_shape=[1, 1024, 24, 24], neck_ignore_stages=[0, 1], backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.is_hybrid = is_hybrid use_autobackbone = False if self.is_hybrid: if backbone_config is None: backbone_config = {'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True} if isinstance(backbone_config, dict): logger.info('Initializing the config with a `BiT` backbone.') backbone_config = BitConfig(**backbone_config) elif isinstance(backbone_config, PretrainedConfig): backbone_config = backbone_config else: raise ValueError(f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.') self.backbone_config = backbone_config self.backbone_featmap_shape = backbone_featmap_shape self.neck_ignore_stages = neck_ignore_stages if readout_type != 'project': raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.") elif backbone is not None or backbone_config is not None: use_autobackbone = True if isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) self.backbone_config = backbone_config self.backbone_featmap_shape = None self.neck_ignore_stages = [] verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) else: self.backbone_config = None self.backbone_featmap_shape = None self.neck_ignore_stages = [] self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.use_autobackbone = use_autobackbone self.backbone_out_indices = None if use_autobackbone else backbone_out_indices if readout_type not in ['ignore', 'add', 'project']: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") self.hidden_act = hidden_act self.initializer_range = initializer_range self.readout_type = readout_type self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual self.use_bias_in_fusion_residual = use_bias_in_fusion_residual self.add_projection = add_projection self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.semantic_loss_ignore_index = semantic_loss_ignore_index self.semantic_classifier_dropout = semantic_classifier_dropout def to_dict(self): output = copy.deepcopy(self.__dict__) if output['backbone_config'] is not None: output['backbone_config'] = self.backbone_config.to_dict() output['model_type'] = self.__class__.model_type return output # File: transformers-main/src/transformers/models/dpt/convert_dinov2_depth_to_hf.py """""" import argparse import itertools import math from pathlib import Path import requests import torch from PIL import Image from torchvision import transforms from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): if 'small' in model_name: backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-small', out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False) neck_hidden_sizes = [48, 96, 192, 384] elif 'base' in model_name: backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-base', out_indices=[3, 6, 9, 12], apply_layernorm=False, reshape_hidden_states=False) neck_hidden_sizes = [96, 192, 384, 768] elif 'large' in model_name: backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-large', out_indices=[5, 12, 18, 24], apply_layernorm=False, reshape_hidden_states=False) neck_hidden_sizes = [128, 256, 512, 1024] elif 'giant' in model_name: backbone_config = Dinov2Config.from_pretrained('facebook/dinov2-giant', out_indices=[10, 20, 30, 40], apply_layernorm=False, reshape_hidden_states=False) neck_hidden_sizes = [192, 384, 768, 1536] else: raise NotImplementedError('To do') config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes, use_bias_in_fusion_residual=False, add_projection=True) return config def create_rename_keys_dpt(config): rename_keys = [] for i in range(4): rename_keys.append((f'decode_head.reassemble_blocks.projects.{i}.conv.weight', f'neck.reassemble_stage.layers.{i}.projection.weight')) rename_keys.append((f'decode_head.reassemble_blocks.projects.{i}.conv.bias', f'neck.reassemble_stage.layers.{i}.projection.bias')) rename_keys.append((f'decode_head.reassemble_blocks.readout_projects.{i}.0.weight', f'neck.reassemble_stage.readout_projects.{i}.0.weight')) rename_keys.append((f'decode_head.reassemble_blocks.readout_projects.{i}.0.bias', f'neck.reassemble_stage.readout_projects.{i}.0.bias')) if i != 2: rename_keys.append((f'decode_head.reassemble_blocks.resize_layers.{i}.weight', f'neck.reassemble_stage.layers.{i}.resize.weight')) rename_keys.append((f'decode_head.reassemble_blocks.resize_layers.{i}.bias', f'neck.reassemble_stage.layers.{i}.resize.bias')) for i in range(4): rename_keys.append((f'decode_head.fusion_blocks.{i}.project.conv.weight', f'neck.fusion_stage.layers.{i}.projection.weight')) rename_keys.append((f'decode_head.fusion_blocks.{i}.project.conv.bias', f'neck.fusion_stage.layers.{i}.projection.bias')) if i != 0: rename_keys.append((f'decode_head.fusion_blocks.{i}.res_conv_unit1.conv1.conv.weight', f'neck.fusion_stage.layers.{i}.residual_layer1.convolution1.weight')) rename_keys.append((f'decode_head.fusion_blocks.{i}.res_conv_unit1.conv2.conv.weight', f'neck.fusion_stage.layers.{i}.residual_layer1.convolution2.weight')) rename_keys.append((f'decode_head.fusion_blocks.{i}.res_conv_unit2.conv1.conv.weight', f'neck.fusion_stage.layers.{i}.residual_layer2.convolution1.weight')) rename_keys.append((f'decode_head.fusion_blocks.{i}.res_conv_unit2.conv2.conv.weight', f'neck.fusion_stage.layers.{i}.residual_layer2.convolution2.weight')) for i in range(4): rename_keys.append((f'decode_head.convs.{i}.conv.weight', f'neck.convs.{i}.weight')) rename_keys.append(('decode_head.project.conv.weight', 'head.projection.weight')) rename_keys.append(('decode_head.project.conv.bias', 'head.projection.bias')) for i in range(0, 5, 2): rename_keys.append((f'decode_head.conv_depth.head.{i}.weight', f'head.head.{i}.weight')) rename_keys.append((f'decode_head.conv_depth.head.{i}.bias', f'head.head.{i}.bias')) return rename_keys def create_rename_keys_backbone(config): rename_keys = [] rename_keys.append(('cls_token', 'backbone.embeddings.cls_token')) rename_keys.append(('mask_token', 'backbone.embeddings.mask_token')) rename_keys.append(('pos_embed', 'backbone.embeddings.position_embeddings')) rename_keys.append(('patch_embed.proj.weight', 'backbone.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('patch_embed.proj.bias', 'backbone.embeddings.patch_embeddings.projection.bias')) for i in range(config.backbone_config.num_hidden_layers): rename_keys.append((f'blocks.{i}.norm1.weight', f'backbone.encoder.layer.{i}.norm1.weight')) rename_keys.append((f'blocks.{i}.norm1.bias', f'backbone.encoder.layer.{i}.norm1.bias')) rename_keys.append((f'blocks.{i}.norm2.weight', f'backbone.encoder.layer.{i}.norm2.weight')) rename_keys.append((f'blocks.{i}.norm2.bias', f'backbone.encoder.layer.{i}.norm2.bias')) if config.backbone_config.use_swiglu_ffn: rename_keys.append((f'blocks.{i}.mlp.w12.weight', f'backbone.encoder.layer.{i}.mlp.w12.weight')) rename_keys.append((f'blocks.{i}.mlp.w12.bias', f'backbone.encoder.layer.{i}.mlp.w12.bias')) rename_keys.append((f'blocks.{i}.mlp.w3.weight', f'backbone.encoder.layer.{i}.mlp.w3.weight')) rename_keys.append((f'blocks.{i}.mlp.w3.bias', f'backbone.encoder.layer.{i}.mlp.w3.bias')) else: rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'backbone.encoder.layer.{i}.mlp.fc1.weight')) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'backbone.encoder.layer.{i}.mlp.fc1.bias')) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'backbone.encoder.layer.{i}.mlp.fc2.weight')) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'backbone.encoder.layer.{i}.mlp.fc2.bias')) rename_keys.append((f'blocks.{i}.ls1.gamma', f'backbone.encoder.layer.{i}.layer_scale1.lambda1')) rename_keys.append((f'blocks.{i}.ls2.gamma', f'backbone.encoder.layer.{i}.layer_scale2.lambda1')) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'backbone.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'backbone.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append(('norm.weight', 'backbone.layernorm.weight')) rename_keys.append(('norm.bias', 'backbone.layernorm.bias')) return rename_keys def read_in_q_k_v(state_dict, config): for i in range(config.backbone_config.num_hidden_layers): in_proj_weight = state_dict.pop(f'blocks.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'blocks.{i}.attn.qkv.bias') hidden_size = config.backbone_config.hidden_size state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'backbone.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-hidden_size:] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'https://dl.fbaipublicfiles.com/dinov2/images/example.jpg' im = Image.open(requests.get(url, stream=True).raw) return im name_to_url = {'dpt-dinov2-small-nyu': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_dpt_head.pth', 'dpt-dinov2-small-kitti': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_dpt_head.pth', 'dpt-dinov2-base-nyu': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_dpt_head.pth', 'dpt-dinov2-base-kitti': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_dpt_head.pth', 'dpt-dinov2-large-nyu': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_dpt_head.pth', 'dpt-dinov2-large-kitti': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_dpt_head.pth', 'dpt-dinov2-giant-nyu': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_dpt_head.pth', 'dpt-dinov2-giant-kitti': 'https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_dpt_head.pth'} def get_original_pixel_values(image): class CenterPadding: def __init__(self, multiple): super().__init__() self.multiple = multiple def _get_pad(self, size): new_size = math.ceil(size / self.multiple) * self.multiple pad_size = new_size - size pad_size_left = pad_size // 2 pad_size_right = pad_size - pad_size_left return (pad_size_left, pad_size_right) def __call__(self, img): pads = list(itertools.chain.from_iterable((self._get_pad(m) for m in img.shape[-2:][::-1]))) output = torch.nn.functional.pad(img, pads) return output def __repr__(self): return self.__class__.__name__ + '()' def make_depth_transform() -> transforms.Compose: return transforms.Compose([transforms.ToTensor(), lambda x: 255.0 * x[:3], transforms.Normalize(mean=(123.675, 116.28, 103.53), std=(58.395, 57.12, 57.375)), CenterPadding(multiple=14)]) transform = make_depth_transform() original_pixel_values = transform(image).unsqueeze(0) return original_pixel_values @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, verify_logits): checkpoint_url = name_to_url[model_name] config = get_dpt_config(model_name) print('URL:', checkpoint_url) dpt_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu')['state_dict'] rename_keys = create_rename_keys_dpt(config) for (src, dest) in rename_keys: rename_key(dpt_state_dict, src, dest) if 'small' in model_name: original_model = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14') elif 'base' in model_name: original_model = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14') elif 'large' in model_name: original_model = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14') elif 'giant' in model_name: original_model = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14') else: raise NotImplementedError('To do') original_model.eval() backbone_state_dict = original_model.state_dict() rename_keys = create_rename_keys_backbone(config) for (src, dest) in rename_keys: rename_key(backbone_state_dict, src, dest) read_in_q_k_v(backbone_state_dict, config) for (key, val) in backbone_state_dict.copy().items(): val = backbone_state_dict.pop(key) if 'w12' in key: key = key.replace('w12', 'weights_in') if 'w3' in key: key = key.replace('w3', 'weights_out') backbone_state_dict[key] = val state_dict = {**backbone_state_dict, **dpt_state_dict} model = DPTForDepthEstimation(config) (missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False) print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) assert missing_keys == ['neck.fusion_stage.layers.0.residual_layer1.convolution1.weight', 'neck.fusion_stage.layers.0.residual_layer1.convolution2.weight'] model.eval() processor = DPTImageProcessor(do_resize=False, do_rescale=False, do_pad=True, size_divisor=14, do_normalize=True, image_mean=(123.675, 116.28, 103.53), image_std=(58.395, 57.12, 57.375)) image = prepare_img() pixel_values = processor(image, return_tensors='pt').pixel_values.float() original_pixel_values = get_original_pixel_values(image) assert torch.allclose(pixel_values, original_pixel_values) with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print('Shape of predicted depth:', predicted_depth.shape) print('First values of predicted depth:', predicted_depth[0, :3, :3]) if verify_logits: if model_name == 'dpt-dinov2-small-nyu': expected_shape = torch.Size([1, 576, 736]) expected_slice = torch.tensor([[3.3576, 3.4741, 3.4345], [3.4324, 3.5012, 3.2775], [3.256, 3.3563, 3.2354]]) assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-05) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model and processor to hub...') model.push_to_hub(repo_id=f'facebook/{model_name}') processor.push_to_hub(repo_id=f'facebook/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='dpt-dinov2-small-nyu', type=str, choices=name_to_url.keys(), help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub after conversion.') parser.add_argument('--verify_logits', action='store_true', required=False, help='Path to the output PyTorch model directory.') args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.verify_logits) # File: transformers-main/src/transformers/models/dpt/convert_dpt_beit_to_hf.py """""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import BeitConfig, DPTConfig, DPTForDepthEstimation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): hidden_size = 768 num_hidden_layers = 12 num_attention_heads = 12 intermediate_size = 3072 out_features = ['stage3', 'stage6', 'stage9', 'stage12'] if 'large' in model_name: hidden_size = 1024 num_hidden_layers = 24 num_attention_heads = 16 intermediate_size = 4096 out_features = ['stage6', 'stage12', 'stage18', 'stage24'] if '512' in model_name: image_size = 512 elif '384' in model_name: image_size = 384 else: raise ValueError('Model not supported') backbone_config = BeitConfig(image_size=image_size, num_hidden_layers=num_hidden_layers, hidden_size=hidden_size, intermediate_size=intermediate_size, num_attention_heads=num_attention_heads, use_relative_position_bias=True, reshape_hidden_states=False, out_features=out_features) neck_hidden_sizes = [256, 512, 1024, 1024] if 'large' in model_name else [96, 192, 384, 768] config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes) return (config, image_size) def create_rename_keys(config): rename_keys = [] rename_keys.append(('pretrained.model.cls_token', 'backbone.embeddings.cls_token')) rename_keys.append(('pretrained.model.patch_embed.proj.weight', 'backbone.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('pretrained.model.patch_embed.proj.bias', 'backbone.embeddings.patch_embeddings.projection.bias')) for i in range(config.backbone_config.num_hidden_layers): rename_keys.append((f'pretrained.model.blocks.{i}.gamma_1', f'backbone.encoder.layer.{i}.lambda_1')) rename_keys.append((f'pretrained.model.blocks.{i}.gamma_2', f'backbone.encoder.layer.{i}.lambda_2')) rename_keys.append((f'pretrained.model.blocks.{i}.norm1.weight', f'backbone.encoder.layer.{i}.layernorm_before.weight')) rename_keys.append((f'pretrained.model.blocks.{i}.norm1.bias', f'backbone.encoder.layer.{i}.layernorm_before.bias')) rename_keys.append((f'pretrained.model.blocks.{i}.norm2.weight', f'backbone.encoder.layer.{i}.layernorm_after.weight')) rename_keys.append((f'pretrained.model.blocks.{i}.norm2.bias', f'backbone.encoder.layer.{i}.layernorm_after.bias')) rename_keys.append((f'pretrained.model.blocks.{i}.mlp.fc1.weight', f'backbone.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'pretrained.model.blocks.{i}.mlp.fc1.bias', f'backbone.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'pretrained.model.blocks.{i}.mlp.fc2.weight', f'backbone.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'pretrained.model.blocks.{i}.mlp.fc2.bias', f'backbone.encoder.layer.{i}.output.dense.bias')) rename_keys.append((f'pretrained.model.blocks.{i}.attn.proj.weight', f'backbone.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'pretrained.model.blocks.{i}.attn.proj.bias', f'backbone.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'pretrained.model.blocks.{i}.attn.relative_position_bias_table', f'backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table')) rename_keys.append((f'pretrained.model.blocks.{i}.attn.relative_position_index', f'backbone.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index')) for i in range(4): rename_keys.append((f'pretrained.act_postprocess{i + 1}.0.project.0.weight', f'neck.reassemble_stage.readout_projects.{i}.0.weight')) rename_keys.append((f'pretrained.act_postprocess{i + 1}.0.project.0.bias', f'neck.reassemble_stage.readout_projects.{i}.0.bias')) rename_keys.append((f'pretrained.act_postprocess{i + 1}.3.weight', f'neck.reassemble_stage.layers.{i}.projection.weight')) rename_keys.append((f'pretrained.act_postprocess{i + 1}.3.bias', f'neck.reassemble_stage.layers.{i}.projection.bias')) if i != 2: rename_keys.append((f'pretrained.act_postprocess{i + 1}.4.weight', f'neck.reassemble_stage.layers.{i}.resize.weight')) rename_keys.append((f'pretrained.act_postprocess{i + 1}.4.bias', f'neck.reassemble_stage.layers.{i}.resize.bias')) mapping = {1: 3, 2: 2, 3: 1, 4: 0} for i in range(1, 5): j = mapping[i] rename_keys.append((f'scratch.refinenet{i}.out_conv.weight', f'neck.fusion_stage.layers.{j}.projection.weight')) rename_keys.append((f'scratch.refinenet{i}.out_conv.bias', f'neck.fusion_stage.layers.{j}.projection.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias')) for i in range(4): rename_keys.append((f'scratch.layer{i + 1}_rn.weight', f'neck.convs.{i}.weight')) for i in range(0, 5, 2): rename_keys.append((f'scratch.output_conv.{i}.weight', f'head.head.{i}.weight')) rename_keys.append((f'scratch.output_conv.{i}.bias', f'head.head.{i}.bias')) return rename_keys def remove_ignore_keys_(state_dict): ignore_keys = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(k, None) def read_in_q_k_v(state_dict, config): hidden_size = config.backbone_config.hidden_size for i in range(config.backbone_config.num_hidden_layers): in_proj_weight = state_dict.pop(f'pretrained.model.blocks.{i}.attn.qkv.weight') q_bias = state_dict.pop(f'pretrained.model.blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'pretrained.model.blocks.{i}.attn.v_bias') state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.query.bias'] = q_bias state_dict[f'backbone.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'backbone.encoder.layer.{i}.attention.attention.value.bias'] = v_bias def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): name_to_url = {'dpt-beit-large-512': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt', 'dpt-beit-large-384': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt', 'dpt-beit-base-384': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt'} checkpoint_url = name_to_url[model_name] (config, image_size) = get_dpt_config(model_name) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu') remove_ignore_keys_(state_dict) rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config) model = DPTForDepthEstimation(config) (missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False) print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) assert missing_keys == [] model.eval() processor = DPTImageProcessor(size={'height': image_size, 'width': image_size}, keep_aspect_ratio=False, ensure_multiple_of=32) image = prepare_img() pixel_values = processor(image, return_tensors='pt').pixel_values print('First values of pixel values:', pixel_values[0, 0, :3, :3]) print('Mean of pixel values:', pixel_values.mean().item()) print('Shape of pixel values:', pixel_values.shape) import requests from PIL import Image from torchvision import transforms url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) transforms = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor()]) pixel_values = transforms(image).unsqueeze(0) with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print('Shape of predicted depth:', predicted_depth.shape) print('First values of predicted depth:', predicted_depth[0, :3, :3]) if model_name == 'dpt-beit-large-512': expected_shape = torch.Size([1, 512, 512]) expected_slice = torch.tensor([[2804.626, 2792.5708, 2812.9263], [2772.0288, 2780.1118, 2796.2529], [2748.1094, 2766.6558, 2766.9834]]) elif model_name == 'dpt-beit-large-384': expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1783.2273, 1780.5729, 1792.6453], [1759.9817, 1765.5359, 1778.5002], [1739.1633, 1754.7903, 1757.199]]) elif model_name == 'dpt-beit-base-384': expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[2898.4482, 2891.375, 2904.8079], [2858.6685, 2877.2615, 2894.4507], [2842.1235, 2854.1023, 2861.6328]]) assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model and processor to hub...') model.push_to_hub(repo_id=f'nielsr/{model_name}') processor.push_to_hub(repo_id=f'nielsr/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='dpt-beit-large-512', type=str, choices=['dpt-beit-large-512', 'dpt-beit-large-384', 'dpt-beit-base-384'], help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub after conversion.') args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/dpt/convert_dpt_hybrid_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(checkpoint_url): config = DPTConfig(embedding_type='hybrid') if 'large' in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.backbone_out_indices = [5, 11, 17, 23] config.neck_hidden_sizes = [256, 512, 1024, 1024] expected_shape = (1, 384, 384) if 'nyu' in checkpoint_url or 'midas' in checkpoint_url: config.hidden_size = 768 config.reassemble_factors = [1, 1, 1, 0.5] config.neck_hidden_sizes = [256, 512, 768, 768] config.num_labels = 150 config.patch_size = 16 expected_shape = (1, 384, 384) config.use_batch_norm_in_fusion_residual = False config.readout_type = 'project' if 'ade' in checkpoint_url: config.use_batch_norm_in_fusion_residual = True config.hidden_size = 768 config.reassemble_stage = [1, 1, 1, 0.5] config.num_labels = 150 config.patch_size = 16 repo_id = 'huggingface/label-files' filename = 'ade20k-id2label.json' id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type='dataset')).read_text()) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} expected_shape = [1, 150, 480, 480] return (config, expected_shape) def remove_ignore_keys_(state_dict): ignore_keys = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if 'pretrained.model' in name and 'cls_token' not in name and ('pos_embed' not in name) and ('patch_embed' not in name): name = name.replace('pretrained.model', 'dpt.encoder') if 'pretrained.model' in name: name = name.replace('pretrained.model', 'dpt.embeddings') if 'patch_embed' in name: name = name.replace('patch_embed', '') if 'pos_embed' in name: name = name.replace('pos_embed', 'position_embeddings') if 'attn.proj' in name: name = name.replace('attn.proj', 'attention.output.dense') if 'proj' in name and 'project' not in name: name = name.replace('proj', 'projection') if 'blocks' in name: name = name.replace('blocks', 'layer') if 'mlp.fc1' in name: name = name.replace('mlp.fc1', 'intermediate.dense') if 'mlp.fc2' in name: name = name.replace('mlp.fc2', 'output.dense') if 'norm1' in name and 'backbone' not in name: name = name.replace('norm1', 'layernorm_before') if 'norm2' in name and 'backbone' not in name: name = name.replace('norm2', 'layernorm_after') if 'scratch.output_conv' in name: name = name.replace('scratch.output_conv', 'head') if 'scratch' in name: name = name.replace('scratch', 'neck') if 'layer1_rn' in name: name = name.replace('layer1_rn', 'convs.0') if 'layer2_rn' in name: name = name.replace('layer2_rn', 'convs.1') if 'layer3_rn' in name: name = name.replace('layer3_rn', 'convs.2') if 'layer4_rn' in name: name = name.replace('layer4_rn', 'convs.3') if 'refinenet' in name: layer_idx = int(name[len('neck.refinenet'):len('neck.refinenet') + 1]) name = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx - 4)}') if 'out_conv' in name: name = name.replace('out_conv', 'projection') if 'resConfUnit1' in name: name = name.replace('resConfUnit1', 'residual_layer1') if 'resConfUnit2' in name: name = name.replace('resConfUnit2', 'residual_layer2') if 'conv1' in name: name = name.replace('conv1', 'convolution1') if 'conv2' in name: name = name.replace('conv2', 'convolution2') if 'pretrained.act_postprocess1.0.project.0' in name: name = name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0') if 'pretrained.act_postprocess2.0.project.0' in name: name = name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0') if 'pretrained.act_postprocess3.0.project.0' in name: name = name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0') if 'pretrained.act_postprocess4.0.project.0' in name: name = name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0') if 'pretrained.act_postprocess1.3' in name: name = name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection') if 'pretrained.act_postprocess1.4' in name: name = name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize') if 'pretrained.act_postprocess2.3' in name: name = name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection') if 'pretrained.act_postprocess2.4' in name: name = name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize') if 'pretrained.act_postprocess3.3' in name: name = name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection') if 'pretrained.act_postprocess4.3' in name: name = name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection') if 'pretrained.act_postprocess4.4' in name: name = name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize') if 'pretrained' in name: name = name.replace('pretrained', 'dpt') if 'bn' in name: name = name.replace('bn', 'batch_norm') if 'head' in name: name = name.replace('head', 'head.head') if 'encoder.norm' in name: name = name.replace('encoder.norm', 'layernorm') if 'auxlayer' in name: name = name.replace('auxlayer', 'auxiliary_head.head') if 'backbone' in name: name = name.replace('backbone', 'backbone.bit.encoder') if '..' in name: name = name.replace('..', '.') if 'stem.conv' in name: name = name.replace('stem.conv', 'bit.embedder.convolution') if 'blocks' in name: name = name.replace('blocks', 'layers') if 'convolution' in name and 'backbone' in name: name = name.replace('convolution', 'conv') if 'layer' in name and 'backbone' in name: name = name.replace('layer', 'layers') if 'backbone.bit.encoder.bit' in name: name = name.replace('backbone.bit.encoder.bit', 'backbone.bit') if 'embedder.conv' in name: name = name.replace('embedder.conv', 'embedder.convolution') if 'backbone.bit.encoder.stem.norm' in name: name = name.replace('backbone.bit.encoder.stem.norm', 'backbone.bit.embedder.norm') return name def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): in_proj_weight = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') state_dict[f'dpt.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:config.hidden_size] state_dict[f'dpt.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[config.hidden_size:config.hidden_size * 2] state_dict[f'dpt.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-config.hidden_size:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name, show_prediction): (config, expected_shape) = get_dpt_config(checkpoint_url) state_dict = torch.load(checkpoint_url, map_location='cpu') remove_ignore_keys_(state_dict) for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val read_in_q_k_v(state_dict, config) model = DPTForSemanticSegmentation(config) if 'ade' in checkpoint_url else DPTForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() size = 480 if 'ade' in checkpoint_url else 384 image_processor = DPTImageProcessor(size=size) image = prepare_img() encoding = image_processor(image, return_tensors='pt') outputs = model(**encoding).logits if 'ade' in checkpoint_url else model(**encoding).predicted_depth if show_prediction: prediction = torch.nn.functional.interpolate(outputs.unsqueeze(1), size=(image.size[1], image.size[0]), mode='bicubic', align_corners=False).squeeze().cpu().numpy() Image.fromarray(prediction / prediction.max() * 255).show() if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub('ybelkada/dpt-hybrid-midas') image_processor.push_to_hub('ybelkada/dpt-hybrid-midas') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help="URL of the original DPT checkpoint you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true') parser.add_argument('--model_name', default='dpt-large', type=str, help="Name of the model, in case you're pushing to the hub.") parser.add_argument('--show_prediction', action='store_true') args = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction) # File: transformers-main/src/transformers/models/dpt/convert_dpt_swinv2_to_hf.py """""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTImageProcessor, Swinv2Config from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(model_name): if 'tiny' in model_name: embed_dim = 96 depths = (2, 2, 6, 2) num_heads = (3, 6, 12, 24) window_size = 16 pretrained_window_sizes = (0, 0, 0, 0) elif 'base' in model_name: embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) window_size = 24 pretrained_window_sizes = (12, 12, 12, 6) elif 'large' in model_name: embed_dim = 192 depths = (2, 2, 18, 2) num_heads = (6, 12, 24, 48) window_size = 24 pretrained_window_sizes = (12, 12, 12, 6) if '384' in model_name: image_size = 384 elif '256' in model_name: image_size = 256 else: raise ValueError('Model not supported, to do') backbone_config = Swinv2Config(image_size=image_size, embed_dim=embed_dim, depths=depths, window_size=window_size, pretrained_window_sizes=pretrained_window_sizes, num_heads=num_heads, out_features=['stage1', 'stage2', 'stage3', 'stage4']) if model_name == 'dpt-swinv2-tiny-256': neck_hidden_sizes = [96, 192, 384, 768] elif model_name == 'dpt-swinv2-base-384': neck_hidden_sizes = [128, 256, 512, 1024] elif model_name == 'dpt-swinv2-large-384': neck_hidden_sizes = [192, 384, 768, 1536] config = DPTConfig(backbone_config=backbone_config, neck_hidden_sizes=neck_hidden_sizes) return (config, image_size) def create_rename_keys(config): rename_keys = [] rename_keys.append(('pretrained.model.patch_embed.proj.weight', 'backbone.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('pretrained.model.patch_embed.proj.bias', 'backbone.embeddings.patch_embeddings.projection.bias')) rename_keys.append(('pretrained.model.patch_embed.norm.weight', 'backbone.embeddings.norm.weight')) rename_keys.append(('pretrained.model.patch_embed.norm.bias', 'backbone.embeddings.norm.bias')) for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.logit_scale', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.logit_scale')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.0.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.0.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.cpb_mlp.2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.continuous_position_bias_mlp.2.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.q_bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.v_bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.attn.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.mlp.fc1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.mlp.fc1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.mlp.fc2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.mlp.fc2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight')) rename_keys.append((f'pretrained.model.layers.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias')) if i in [0, 1, 2]: rename_keys.append((f'pretrained.model.layers.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight')) rename_keys.append((f'pretrained.model.layers.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight')) rename_keys.append((f'pretrained.model.layers.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias')) mapping = {1: 3, 2: 2, 3: 1, 4: 0} for i in range(1, 5): j = mapping[i] rename_keys.append((f'scratch.refinenet{i}.out_conv.weight', f'neck.fusion_stage.layers.{j}.projection.weight')) rename_keys.append((f'scratch.refinenet{i}.out_conv.bias', f'neck.fusion_stage.layers.{j}.projection.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution1.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit1.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer1.convolution2.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv1.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv1.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution1.bias')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv2.weight', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.weight')) rename_keys.append((f'scratch.refinenet{i}.resConfUnit2.conv2.bias', f'neck.fusion_stage.layers.{j}.residual_layer2.convolution2.bias')) for i in range(4): rename_keys.append((f'scratch.layer{i + 1}_rn.weight', f'neck.convs.{i}.weight')) for i in range(0, 5, 2): rename_keys.append((f'scratch.output_conv.{i}.weight', f'head.head.{i}.weight')) rename_keys.append((f'scratch.output_conv.{i}.bias', f'head.head.{i}.bias')) return rename_keys def remove_ignore_keys_(state_dict): ignore_keys = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(k, None) def read_in_q_k_v(state_dict, config, model): for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): dim = model.backbone.encoder.layers[i].blocks[j].attention.self.all_head_size in_proj_weight = state_dict.pop(f'pretrained.model.layers.{i}.blocks.{j}.attn.qkv.weight') state_dict[f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.query.weight'] = in_proj_weight[:dim, :] state_dict[f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.key.weight'] = in_proj_weight[dim:dim * 2, :] state_dict[f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.value.weight'] = in_proj_weight[-dim:, :] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, verify_logits, push_to_hub): name_to_url = {'dpt-swinv2-tiny-256': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt', 'dpt-swinv2-base-384': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt', 'dpt-swinv2-large-384': 'https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt'} checkpoint_url = name_to_url[model_name] (config, image_size) = get_dpt_config(model_name) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu') model = DPTForDepthEstimation(config) remove_ignore_keys_(state_dict) rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, model) (missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False) print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) model.eval() processor = DPTImageProcessor(size={'height': image_size, 'width': image_size}) image = prepare_img() processor(image, return_tensors='pt') if verify_logits: from torchvision import transforms url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) transforms = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor()]) pixel_values = transforms(image).unsqueeze(0) with torch.no_grad(): outputs = model(pixel_values) predicted_depth = outputs.predicted_depth print('Shape of predicted depth:', predicted_depth.shape) print('First values of predicted depth:', predicted_depth[0, :3, :3]) if model_name == 'dpt-swinv2-base-384': expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1998.5575, 1997.3887, 2009.2981], [1952.8607, 1979.6488, 2001.0854], [1953.7697, 1961.7711, 1968.8904]]) elif model_name == 'dpt-swinv2-tiny-256': expected_shape = torch.Size([1, 256, 256]) expected_slice = torch.tensor([[978.9163, 976.5215, 978.5349], [974.1859, 971.7249, 975.8046], [971.3419, 970.3118, 971.683]]) elif model_name == 'dpt-swinv2-large-384': expected_shape = torch.Size([1, 384, 384]) expected_slice = torch.tensor([[1203.7206, 1200.1495, 1197.8234], [1196.2484, 1183.5033, 1186.464], [1178.8131, 1182.326, 1174.3975]]) assert predicted_depth.shape == torch.Size(expected_shape) assert torch.allclose(predicted_depth[0, :3, :3], expected_slice) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model and processor to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model and processor to hub...') model.push_to_hub(repo_id=f'Intel/{model_name}') processor.push_to_hub(repo_id=f'Intel/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='dpt-swinv2-base-384', type=str, choices=['dpt-swinv2-tiny-256', 'dpt-swinv2-base-384', 'dpt-swinv2-large-384'], help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--verify_logits', action='store_true', help='Whether to verify logits after conversion.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub after conversion.') args = parser.parse_args() convert_dpt_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.verify_logits, args.push_to_hub) # File: transformers-main/src/transformers/models/dpt/convert_dpt_to_pytorch.py """""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(checkpoint_url): config = DPTConfig() if 'large' in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.backbone_out_indices = [5, 11, 17, 23] config.neck_hidden_sizes = [256, 512, 1024, 1024] expected_shape = (1, 384, 384) if 'ade' in checkpoint_url: config.use_batch_norm_in_fusion_residual = True config.num_labels = 150 repo_id = 'huggingface/label-files' filename = 'ade20k-id2label.json' id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type='dataset')).read_text()) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} expected_shape = [1, 150, 480, 480] return (config, expected_shape) def remove_ignore_keys_(state_dict): ignore_keys = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if 'pretrained.model' in name and 'cls_token' not in name and ('pos_embed' not in name) and ('patch_embed' not in name): name = name.replace('pretrained.model', 'dpt.encoder') if 'pretrained.model' in name: name = name.replace('pretrained.model', 'dpt.embeddings') if 'patch_embed' in name: name = name.replace('patch_embed', 'patch_embeddings') if 'pos_embed' in name: name = name.replace('pos_embed', 'position_embeddings') if 'attn.proj' in name: name = name.replace('attn.proj', 'attention.output.dense') if 'proj' in name and 'project' not in name: name = name.replace('proj', 'projection') if 'blocks' in name: name = name.replace('blocks', 'layer') if 'mlp.fc1' in name: name = name.replace('mlp.fc1', 'intermediate.dense') if 'mlp.fc2' in name: name = name.replace('mlp.fc2', 'output.dense') if 'norm1' in name: name = name.replace('norm1', 'layernorm_before') if 'norm2' in name: name = name.replace('norm2', 'layernorm_after') if 'scratch.output_conv' in name: name = name.replace('scratch.output_conv', 'head') if 'scratch' in name: name = name.replace('scratch', 'neck') if 'layer1_rn' in name: name = name.replace('layer1_rn', 'convs.0') if 'layer2_rn' in name: name = name.replace('layer2_rn', 'convs.1') if 'layer3_rn' in name: name = name.replace('layer3_rn', 'convs.2') if 'layer4_rn' in name: name = name.replace('layer4_rn', 'convs.3') if 'refinenet' in name: layer_idx = int(name[len('neck.refinenet'):len('neck.refinenet') + 1]) name = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx - 4)}') if 'out_conv' in name: name = name.replace('out_conv', 'projection') if 'resConfUnit1' in name: name = name.replace('resConfUnit1', 'residual_layer1') if 'resConfUnit2' in name: name = name.replace('resConfUnit2', 'residual_layer2') if 'conv1' in name: name = name.replace('conv1', 'convolution1') if 'conv2' in name: name = name.replace('conv2', 'convolution2') if 'pretrained.act_postprocess1.0.project.0' in name: name = name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0') if 'pretrained.act_postprocess2.0.project.0' in name: name = name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0') if 'pretrained.act_postprocess3.0.project.0' in name: name = name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0') if 'pretrained.act_postprocess4.0.project.0' in name: name = name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0') if 'pretrained.act_postprocess1.3' in name: name = name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection') if 'pretrained.act_postprocess1.4' in name: name = name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize') if 'pretrained.act_postprocess2.3' in name: name = name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection') if 'pretrained.act_postprocess2.4' in name: name = name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize') if 'pretrained.act_postprocess3.3' in name: name = name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection') if 'pretrained.act_postprocess4.3' in name: name = name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection') if 'pretrained.act_postprocess4.4' in name: name = name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize') if 'pretrained' in name: name = name.replace('pretrained', 'dpt') if 'bn' in name: name = name.replace('bn', 'batch_norm') if 'head' in name: name = name.replace('head', 'head.head') if 'encoder.norm' in name: name = name.replace('encoder.norm', 'layernorm') if 'auxlayer' in name: name = name.replace('auxlayer', 'auxiliary_head.head') return name def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): in_proj_weight = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias') state_dict[f'dpt.encoder.layer.{i}.attention.attention.query.weight'] = in_proj_weight[:config.hidden_size, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.query.bias'] = in_proj_bias[:config.hidden_size] state_dict[f'dpt.encoder.layer.{i}.attention.attention.key.weight'] = in_proj_weight[config.hidden_size:config.hidden_size * 2, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.key.bias'] = in_proj_bias[config.hidden_size:config.hidden_size * 2] state_dict[f'dpt.encoder.layer.{i}.attention.attention.value.weight'] = in_proj_weight[-config.hidden_size:, :] state_dict[f'dpt.encoder.layer.{i}.attention.attention.value.bias'] = in_proj_bias[-config.hidden_size:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name): (config, expected_shape) = get_dpt_config(checkpoint_url) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu') remove_ignore_keys_(state_dict) for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val read_in_q_k_v(state_dict, config) model = DPTForSemanticSegmentation(config) if 'ade' in checkpoint_url else DPTForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() size = 480 if 'ade' in checkpoint_url else 384 image_processor = DPTImageProcessor(size=size) image = prepare_img() encoding = image_processor(image, return_tensors='pt') outputs = model(**encoding).logits if 'ade' in checkpoint_url else model(**encoding).predicted_depth expected_slice = torch.tensor([[6.3199, 6.3629, 6.4148], [6.385, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if 'ade' in checkpoint_url: expected_slice = torch.tensor([[4.048, 4.242, 4.436], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(expected_shape) assert torch.allclose(outputs[0, 0, :3, :3], expected_slice, atol=0.0001) if 'ade' in checkpoint_url else torch.allclose(outputs[0, :3, :3], expected_slice) print('Looks ok!') if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) print(f'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print('Pushing model to hub...') model.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add model', use_temp_dir=True) image_processor.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add image processor', use_temp_dir=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help="URL of the original DPT checkpoint you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true') parser.add_argument('--model_name', default='dpt-large', type=str, required=False, help="Name of the model, in case you're pushing to the hub.") args = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) # File: transformers-main/src/transformers/models/dpt/feature_extraction_dpt.py """""" import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor logger = logging.get_logger(__name__) class DPTFeatureExtractor(DPTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DPTImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/dpt/image_processing_dpt.py """""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import pad, resize, to_channel_dimension_format from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL logger = logging.get_logger(__name__) def get_resize_output_image_size(input_image: np.ndarray, output_size: Union[int, Iterable[int]], keep_aspect_ratio: bool, multiple: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: def constrain_to_multiple_of(val, multiple, min_val=0, max_val=None): x = round(val / multiple) * multiple if max_val is not None and x > max_val: x = math.floor(val / multiple) * multiple if x < min_val: x = math.ceil(val / multiple) * multiple return x output_size = (output_size, output_size) if isinstance(output_size, int) else output_size (input_height, input_width) = get_image_size(input_image, input_data_format) (output_height, output_width) = output_size scale_height = output_height / input_height scale_width = output_width / input_width if keep_aspect_ratio: if abs(1 - scale_width) < abs(1 - scale_height): scale_height = scale_width else: scale_width = scale_height new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple) new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple) return (new_height, new_width) class DPTImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: bool=False, size_divisor: int=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 384, 'width': 384} size = get_size_dict(size) self.do_resize = do_resize self.size = size self.keep_aspect_ratio = keep_aspect_ratio self.ensure_multiple_of = ensure_multiple_of self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_pad = do_pad self.size_divisor = size_divisor def resize(self, image: np.ndarray, size: Dict[str, int], keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") output_size = get_resize_output_image_size(image, output_size=(size['height'], size['width']), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def pad_image(self, image: np.array, size_divisor: int, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): def _get_pad(size, size_divisor): new_size = math.ceil(size / size_divisor) * size_divisor pad_size = new_size - size pad_size_left = pad_size // 2 pad_size_right = pad_size - pad_size_left return (pad_size_left, pad_size_right) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) (height, width) = get_image_size(image, input_data_format) (pad_size_left, pad_size_right) = _get_pad(height, size_divisor) (pad_size_top, pad_size_bottom) = _get_pad(width, size_divisor) return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: int=None, keep_aspect_ratio: bool=None, ensure_multiple_of: int=None, resample: PILImageResampling=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: bool=None, size_divisor: int=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad size_divisor = size_divisor if size_divisor is not None else self.size_divisor images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size_divisor, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] if do_pad: images = [self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple]=None): logits = outputs.logits if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() semantic_segmentation = [] for idx in range(len(logits)): resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = logits.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # File: transformers-main/src/transformers/models/dpt/modeling_dpt.py """""" import collections.abc import math from dataclasses import dataclass from typing import List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, logging, torch_int from ...utils.backbone_utils import load_backbone from .configuration_dpt import DPTConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'DPTConfig' _CHECKPOINT_FOR_DOC = 'Intel/dpt-large' _EXPECTED_OUTPUT_SHAPE = [1, 577, 1024] @dataclass class BaseModelOutputWithIntermediateActivations(ModelOutput): last_hidden_states: torch.FloatTensor = None intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class BaseModelOutputWithPoolingAndIntermediateActivations(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None class DPTViTHybridEmbeddings(nn.Module): def __init__(self, config, feature_size=None): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.backbone = load_backbone(config) feature_dim = self.backbone.channels[-1] if len(self.backbone.channels) != 3: raise ValueError(f'Expected backbone to have 3 output features, got {len(self.backbone.channels)}') self.residual_feature_map_index = [0, 1] if feature_size is None: feat_map_shape = config.backbone_featmap_shape feature_size = feat_map_shape[-2:] feature_dim = feat_map_shape[1] else: feature_size = feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size) feature_dim = self.backbone.channels[-1] self.image_size = image_size self.patch_size = patch_size[0] self.num_channels = num_channels self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=1) self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = torch_int(len(posemb_grid) ** 0.5) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False, return_dict: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") position_embeddings = self._resize_pos_embed(self.position_embeddings, height // self.patch_size, width // self.patch_size) backbone_output = self.backbone(pixel_values) features = backbone_output.feature_maps[-1] output_hidden_states = [backbone_output.feature_maps[index] for index in self.residual_feature_map_index] embeddings = self.projection(features).flatten(2).transpose(1, 2) cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) embeddings = embeddings + position_embeddings if not return_dict: return (embeddings, output_hidden_states) return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings, intermediate_activations=output_hidden_states) class DPTViTEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = DPTViTPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = torch_int(posemb_grid.size(0) ** 0.5) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward(self, pixel_values, return_dict=False): (batch_size, num_channels, height, width) = pixel_values.shape patch_size = self.config.patch_size position_embeddings = self._resize_pos_embed(self.position_embeddings, height // patch_size, width // patch_size) embeddings = self.patch_embeddings(pixel_values) (batch_size, seq_len, _) = embeddings.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) if not return_dict: return (embeddings,) return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings) class DPTViTPatchEmbeddings(nn.Module): def __init__(self, config): super().__init__() (image_size, patch_size) = (config.image_size, config.patch_size) (num_channels, hidden_size) = (config.num_channels, config.hidden_size) image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): (batch_size, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings class DPTViTSelfAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class DPTViTSelfOutput(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DPTViTAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.attention = DPTViTSelfAttention(config) self.output = DPTViTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class DPTViTIntermediate(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class DPTViTOutput(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class DPTViTLayer(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = DPTViTAttention(config) self.intermediate = DPTViTIntermediate(config) self.output = DPTViTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention(self.layernorm_before(hidden_states), head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class DPTViTEncoder(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([DPTViTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class DPTReassembleStage(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() if config.is_hybrid: self._init_reassemble_dpt_hybrid(config) else: self._init_reassemble_dpt(config) self.neck_ignore_stages = config.neck_ignore_stages def _init_reassemble_dpt_hybrid(self, config): for (i, factor) in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): if i <= 1: self.layers.append(nn.Identity()) elif i > 1: self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type != 'project': raise ValueError(f'Readout type {config.readout_type} is not supported for DPT-Hybrid.') self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for i in range(len(config.neck_hidden_sizes)): if i <= 1: self.readout_projects.append(nn.Sequential(nn.Identity())) elif i > 1: self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])) def _init_reassemble_dpt(self, config): for (i, factor) in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type == 'project': self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for _ in range(len(config.neck_hidden_sizes)): self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: out = [] for (i, hidden_state) in enumerate(hidden_states): if i not in self.neck_ignore_stages: (cls_token, hidden_state) = (hidden_state[:, 0], hidden_state[:, 1:]) (batch_size, sequence_length, num_channels) = hidden_state.shape if patch_height is not None and patch_width is not None: hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) else: size = torch_int(sequence_length ** 0.5) hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_shape = hidden_state.shape if self.config.readout_type == 'project': hidden_state = hidden_state.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(1).expand_as(hidden_state) hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1)) hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape) elif self.config.readout_type == 'add': hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1) hidden_state = hidden_state.reshape(feature_shape) hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out def _get_backbone_hidden_size(config): if config.backbone_config is not None and config.is_hybrid is False: return config.backbone_config.hidden_size else: return config.hidden_size class DPTReassembleLayer(nn.Module): def __init__(self, config, channels, factor): super().__init__() hidden_size = _get_backbone_hidden_size(config) self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1) if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class DPTFeatureFusionStage(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList() for _ in range(len(config.neck_hidden_sizes)): self.layers.append(DPTFeatureFusionLayer(config)) def forward(self, hidden_states): hidden_states = hidden_states[::-1] fused_hidden_states = [] fused_hidden_state = self.layers[0](hidden_states[0]) fused_hidden_states.append(fused_hidden_state) for (hidden_state, layer) in zip(hidden_states[1:], self.layers[1:]): fused_hidden_state = layer(fused_hidden_state, hidden_state) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class DPTPreActResidualLayer(nn.Module): def __init__(self, config): super().__init__() self.use_batch_norm = config.use_batch_norm_in_fusion_residual use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual) if self.use_batch_norm: self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size) self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm2(hidden_state) return hidden_state + residual class DPTFeatureFusionLayer(nn.Module): def __init__(self, config, align_corners=True): super().__init__() self.align_corners = align_corners self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = DPTPreActResidualLayer(config) self.residual_layer2 = DPTPreActResidualLayer(config) def forward(self, hidden_state, residual=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners) hidden_state = self.projection(hidden_state) return hidden_state class DPTPreTrainedModel(PreTrainedModel): config_class = DPTConfig base_model_prefix = 'dpt' main_input_name = 'pixel_values' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) DPT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' DPT_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DPTImageProcessor.__call__`]\n for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare DPT Model transformer outputting raw hidden-states without any specific head on top.', DPT_START_DOCSTRING) class DPTModel(DPTPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config if config.is_hybrid: self.embeddings = DPTViTHybridEmbeddings(config) else: self.embeddings = DPTViTEmbeddings(config) self.encoder = DPTViTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DPTViTPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): if self.config.is_hybrid: return self.embeddings else: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndIntermediateActivations, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndIntermediateActivations]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, return_dict=return_dict) embedding_last_hidden_states = embedding_output[0] if not return_dict else embedding_output.last_hidden_states encoder_outputs = self.encoder(embedding_last_hidden_states, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] + embedding_output[1:] return BaseModelOutputWithPoolingAndIntermediateActivations(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, intermediate_activations=embedding_output.intermediate_activations) class DPTViTPooler(nn.Module): def __init__(self, config: DPTConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class DPTNeck(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']: self.reassemble_stage = None else: self.reassemble_stage = DPTReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) self.fusion_stage = DPTFeatureFusionStage(config) def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for (i, feature) in enumerate(hidden_states)] output = self.fusion_stage(features) return output class DPTDepthEstimationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.projection = None if config.add_projection: self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) features = config.fusion_hidden_size self.head = nn.Sequential(nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU()) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: hidden_states = hidden_states[self.config.head_in_index] if self.projection is not None: hidden_states = self.projection(hidden_states) hidden_states = nn.ReLU()(hidden_states) predicted_depth = self.head(hidden_states) predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @add_start_docstrings('\n DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n ', DPT_START_DOCSTRING) class DPTForDepthEstimation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = None if config.is_hybrid is False and (config.backbone_config is not None or config.backbone is not None): self.backbone = load_backbone(config) else: self.dpt = DPTModel(config, add_pooling_layer=False) self.neck = DPTNeck(config) self.head = DPTDepthEstimationHead(config) self.post_init() @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: loss = None if labels is not None: raise NotImplementedError('Training is not implemented yet') return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if self.backbone is not None: outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions) hidden_states = outputs.feature_maps else: outputs = self.dpt(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[1] if not self.config.is_hybrid: hidden_states = [feature for (idx, feature) in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend((feature for (idx, feature) in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:])) hidden_states = backbone_hidden_states (patch_height, patch_width) = (None, None) if self.config.backbone_config is not None and self.config.is_hybrid is False: (_, _, height, width) = pixel_values.shape patch_size = self.config.backbone_config.patch_size patch_height = height // patch_size patch_width = width // patch_size hidden_states = self.neck(hidden_states, patch_height, patch_width) predicted_depth = self.head(hidden_states) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return (loss,) + output if loss is not None else output return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) class DPTSemanticSegmentationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config features = config.fusion_hidden_size self.head = nn.Sequential(nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(config.semantic_classifier_dropout), nn.Conv2d(features, config.num_labels, kernel_size=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: hidden_states = hidden_states[self.config.head_in_index] logits = self.head(hidden_states) return logits class DPTAuxiliaryHead(nn.Module): def __init__(self, config): super().__init__() features = config.fusion_hidden_size self.head = nn.Sequential(nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(0.1, False), nn.Conv2d(features, config.num_labels, kernel_size=1)) def forward(self, hidden_states): logits = self.head(hidden_states) return logits @add_start_docstrings('\n DPT Model with a semantic segmentation head on top e.g. for ADE20k, CityScapes.\n ', DPT_START_DOCSTRING) class DPTForSemanticSegmentation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.dpt = DPTModel(config, add_pooling_layer=False) self.neck = DPTNeck(config) self.head = DPTSemanticSegmentationHead(config) self.auxiliary_head = DPTAuxiliaryHead(config) if config.use_auxiliary_head else None self.post_init() @add_start_docstrings_to_model_forward(DPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SemanticSegmenterOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if labels is not None and self.config.num_labels == 1: raise ValueError('The number of labels should be greater than one') outputs = self.dpt(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[1] if not self.config.is_hybrid: hidden_states = [feature for (idx, feature) in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend((feature for (idx, feature) in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:])) hidden_states = backbone_hidden_states hidden_states = self.neck(hidden_states=hidden_states) logits = self.head(hidden_states) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(hidden_states[-1]) loss = None if labels is not None: upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False) if auxiliary_logits is not None: upsampled_auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=labels.shape[-2:], mode='bilinear', align_corners=False) loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) main_loss = loss_fct(upsampled_logits, labels) auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels) loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/efficientnet/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_efficientnet': ['EfficientNetConfig', 'EfficientNetOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_efficientnet'] = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_efficientnet'] = ['EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel'] if TYPE_CHECKING: from .configuration_efficientnet import EfficientNetConfig, EfficientNetOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/efficientnet/configuration_efficientnet.py """""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class EfficientNetConfig(PretrainedConfig): model_type = 'efficientnet' def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: List[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: List[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: List[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: List[int]=[], strides: List[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: List[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: List[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, dropout_rate: float=0.5, drop_connect_rate: float=0.2, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.image_size = image_size self.width_coefficient = width_coefficient self.depth_coefficient = depth_coefficient self.depth_divisor = depth_divisor self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.depthwise_padding = depthwise_padding self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.squeeze_expansion_ratio = squeeze_expansion_ratio self.hidden_act = hidden_act self.hidden_dim = hidden_dim self.pooling_type = pooling_type self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.batch_norm_momentum = batch_norm_momentum self.dropout_rate = dropout_rate self.drop_connect_rate = drop_connect_rate self.num_hidden_layers = sum(num_block_repeats) * 4 class EfficientNetOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 1e-05 # File: transformers-main/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py """""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) model_classes = {'b0': efficientnet.EfficientNetB0, 'b1': efficientnet.EfficientNetB1, 'b2': efficientnet.EfficientNetB2, 'b3': efficientnet.EfficientNetB3, 'b4': efficientnet.EfficientNetB4, 'b5': efficientnet.EfficientNetB5, 'b6': efficientnet.EfficientNetB6, 'b7': efficientnet.EfficientNetB7} CONFIG_MAP = {'b0': {'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': []}, 'b1': {'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16]}, 'b2': {'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16]}, 'b3': {'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18]}, 'b4': {'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6]}, 'b5': {'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27]}, 'b6': {'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31]}, 'b7': {'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18]}} def get_efficientnet_config(model_name): config = EfficientNetConfig() config.hidden_dim = CONFIG_MAP[model_name]['hidden_dim'] config.width_coefficient = CONFIG_MAP[model_name]['width_coef'] config.depth_coefficient = CONFIG_MAP[model_name]['depth_coef'] config.image_size = CONFIG_MAP[model_name]['image_size'] config.dropout_rate = CONFIG_MAP[model_name]['dropout_rate'] config.depthwise_padding = CONFIG_MAP[model_name]['dw_padding'] repo_id = 'huggingface/label-files' filename = 'imagenet-1k-id2label.json' config.num_labels = 1000 id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} config.id2label = id2label config.label2id = {v: k for (k, v) in id2label.items()} return config def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im def convert_image_processor(model_name): size = CONFIG_MAP[model_name]['image_size'] preprocessor = EfficientNetImageProcessor(size={'height': size, 'width': size}, image_mean=[0.485, 0.456, 0.406], image_std=[0.47853944, 0.4732864, 0.47434163], do_center_crop=False) return preprocessor def rename_keys(original_param_names): block_names = [v.split('_')[0].split('block')[1] for v in original_param_names if v.startswith('block')] block_names = sorted(set(block_names)) num_blocks = len(block_names) block_name_mapping = {b: str(i) for (b, i) in zip(block_names, range(num_blocks))} rename_keys = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight')) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight')) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias')) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean')) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var')) for b in block_names: hf_b = block_name_mapping[b] rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight')) rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight')) rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias')) rename_keys.append((f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean')) rename_keys.append((f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var')) rename_keys.append((f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight')) rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight')) rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias')) rename_keys.append((f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean')) rename_keys.append((f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var')) rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight')) rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias')) rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight')) rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias')) rename_keys.append((f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight')) rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight')) rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias')) rename_keys.append((f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean')) rename_keys.append((f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var')) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight')) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight')) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias')) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean')) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var')) key_mapping = {} for item in rename_keys: if item[0] in original_param_names: key_mapping[item[0]] = 'efficientnet.' + item[1] key_mapping['predictions/kernel:0'] = 'classifier.weight' key_mapping['predictions/bias:0'] = 'classifier.bias' return key_mapping def replace_params(hf_params, tf_params, key_mapping): for (key, value) in tf_params.items(): if 'normalization' in key: continue hf_key = key_mapping[key] if '_conv' in key and 'kernel' in key: new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1) elif 'depthwise_kernel' in key: new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1) elif 'kernel' in key: new_hf_value = torch.from_numpy(np.transpose(value)) else: new_hf_value = torch.from_numpy(value) assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(new_hf_value) @torch.no_grad() def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub): original_model = model_classes[model_name](include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax') tf_params = original_model.trainable_variables tf_non_train_params = original_model.non_trainable_variables tf_params = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: tf_params[param.name] = param.numpy() tf_param_names = list(tf_params.keys()) config = get_efficientnet_config(model_name) hf_model = EfficientNetForImageClassification(config).eval() hf_params = hf_model.state_dict() print('Converting parameters...') key_mapping = rename_keys(tf_param_names) replace_params(hf_params, tf_params, key_mapping) preprocessor = convert_image_processor(model_name) inputs = preprocessor(images=prepare_img(), return_tensors='pt') hf_model.eval() with torch.no_grad(): outputs = hf_model(**inputs) hf_logits = outputs.logits.detach().numpy() original_model.trainable = False image_size = CONFIG_MAP[model_name]['image_size'] img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) original_logits = original_model.predict(x) assert np.allclose(original_logits, hf_logits, atol=0.001), 'The predicted logits are not the same.' print('Model outputs match!') if save_model: if not os.path.isdir(pytorch_dump_folder_path): os.mkdir(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) preprocessor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing converted {model_name} to the hub...') model_name = f'efficientnet-{model_name}' preprocessor.push_to_hub(model_name) hf_model.push_to_hub(model_name) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].') parser.add_argument('--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') args = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) # File: transformers-main/src/transformers/models/efficientnet/image_processing_efficientnet.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class EfficientNetImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PIL.Image.NEAREST, do_center_crop: bool=False, crop_size: Dict[str, int]=None, rescale_factor: Union[int, float]=1 / 255, rescale_offset: bool=False, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, include_top: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 346, 'width': 346} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {'height': 289, 'width': 289} crop_size = get_size_dict(crop_size, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.rescale_offset = rescale_offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.include_top = include_top def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.NEAREST, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def rescale(self, image: np.ndarray, scale: Union[int, float], offset: bool=True, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs): rescaled_image = rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs) if offset: rescaled_image = rescaled_image - 1 return rescaled_image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, rescale_offset: bool=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, include_top: bool=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std include_top = include_top if include_top is not None else self.include_top size = size if size is not None else self.size size = get_size_dict(size) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size') images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, offset=rescale_offset, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] if include_top: images = [self.normalize(image=image, mean=0, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/efficientnet/modeling_efficientnet.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_efficientnet import EfficientNetConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'EfficientNetConfig' _CHECKPOINT_FOR_DOC = 'google/efficientnet-b7' _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7] _IMAGE_CLASS_CHECKPOINT = 'google/efficientnet-b7' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' EFFICIENTNET_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`EfficientNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' EFFICIENTNET_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`AutoImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def round_filters(config: EfficientNetConfig, num_channels: int): divisor = config.depth_divisor num_channels *= config.width_coefficient new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) if new_dim < 0.9 * num_channels: new_dim += divisor return int(new_dim) def correct_pad(kernel_size: Union[int, Tuple], adjust: bool=True): if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) correct = (kernel_size[0] // 2, kernel_size[1] // 2) if adjust: return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) else: return (correct[1], correct[1], correct[0], correct[0]) class EfficientNetEmbeddings(nn.Module): def __init__(self, config: EfficientNetConfig): super().__init__() self.out_dim = round_filters(config, 32) self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) self.convolution = nn.Conv2d(config.num_channels, self.out_dim, kernel_size=3, stride=2, padding='valid', bias=False) self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.activation = ACT2FN[config.hidden_act] def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: features = self.padding(pixel_values) features = self.convolution(features) features = self.batchnorm(features) features = self.activation(features) return features class EfficientNetDepthwiseConv2d(nn.Conv2d): def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): out_channels = in_channels * depth_multiplier super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) class EfficientNetExpansionLayer(nn.Module): def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int): super().__init__() self.expand_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False) self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) self.expand_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.expand_conv(hidden_states) hidden_states = self.expand_bn(hidden_states) hidden_states = self.expand_act(hidden_states) return hidden_states class EfficientNetDepthwiseLayer(nn.Module): def __init__(self, config: EfficientNetConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool): super().__init__() self.stride = stride conv_pad = 'valid' if self.stride == 2 else 'same' padding = correct_pad(kernel_size, adjust=adjust_padding) self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) self.depthwise_conv = EfficientNetDepthwiseConv2d(in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False) self.depthwise_norm = nn.BatchNorm2d(num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.depthwise_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: if self.stride == 2: hidden_states = self.depthwise_conv_pad(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_norm(hidden_states) hidden_states = self.depthwise_act(hidden_states) return hidden_states class EfficientNetSqueezeExciteLayer(nn.Module): def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool=False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d(in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding='same') self.expand = nn.Conv2d(in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding='same') self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states class EfficientNetFinalBlockLayer(nn.Module): def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool): super().__init__() self.apply_dropout = stride == 1 and (not id_skip) self.project_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False) self.project_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.dropout = nn.Dropout(p=drop_rate) def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.project_conv(hidden_states) hidden_states = self.project_bn(hidden_states) if self.apply_dropout: hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + embeddings return hidden_states class EfficientNetBlock(nn.Module): def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool): super().__init__() self.expand_ratio = expand_ratio self.expand = True if self.expand_ratio != 1 else False expand_in_dim = in_dim * expand_ratio if self.expand: self.expansion = EfficientNetExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride) self.depthwise_conv = EfficientNetDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding) self.squeeze_excite = EfficientNetSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand) self.projection = EfficientNetFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: embeddings = hidden_states if self.expand_ratio != 1: hidden_states = self.expansion(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.squeeze_excite(hidden_states) hidden_states = self.projection(embeddings, hidden_states) return hidden_states class EfficientNetEncoder(nn.Module): def __init__(self, config: EfficientNetConfig): super().__init__() self.config = config self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): return int(math.ceil(self.depth_coefficient * repeats)) num_base_blocks = len(config.in_channels) num_blocks = sum((round_repeats(n) for n in config.num_block_repeats)) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = True if j == 0 else False stride = 1 if j > 0 else stride in_dim = out_dim if j > 0 else in_dim adjust_padding = False if curr_block_num in config.depthwise_padding else True drop_rate = config.drop_connect_rate * curr_block_num / num_blocks block = EfficientNetBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) self.top_conv = nn.Conv2d(in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding='same', bias=False) self.top_bn = nn.BatchNorm2d(num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.top_activation = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention: all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.top_conv(hidden_states) hidden_states = self.top_bn(hidden_states) hidden_states = self.top_activation(hidden_states) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) class EfficientNetPreTrainedModel(PreTrainedModel): config_class = EfficientNetConfig base_model_prefix = 'efficientnet' main_input_name = 'pixel_values' _no_split_modules = [] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings('The bare EfficientNet model outputting raw features without any specific head on top.', EFFICIENTNET_START_DOCSTRING) class EfficientNetModel(EfficientNetPreTrainedModel): def __init__(self, config: EfficientNetConfig): super().__init__(config) self.config = config self.embeddings = EfficientNetEmbeddings(config) self.encoder = EfficientNetEncoder(config) if config.pooling_type == 'mean': self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) elif config.pooling_type == 'max': self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) else: raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") self.post_init() @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: torch.FloatTensor=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) pooled_output = pooled_output.reshape(pooled_output.shape[:2]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states) @add_start_docstrings('\n EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.\n for ImageNet.\n ', EFFICIENTNET_START_DOCSTRING) class EfficientNetForImageClassification(EfficientNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.efficientnet = EfficientNetModel(config) self.dropout = nn.Dropout(p=config.dropout_rate) self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(EFFICIENTNET_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: torch.FloatTensor=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) # File: transformers-main/src/transformers/models/electra/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_electra': ['ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_electra_fast'] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_electra'] = ['ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_electra'] = ['TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_electra'] = ['FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel'] if TYPE_CHECKING: from .configuration_electra import ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/electra/configuration_electra.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class ElectraConfig(PretrainedConfig): model_type = 'electra' def __init__(self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type='first', summary_use_proj=True, summary_activation='gelu', summary_last_dropout=0.1, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class ElectraOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) # File: transformers-main/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator): config = ElectraConfig.from_json_file(config_file) print(f'Building PyTorch model from configuration: {config}') if discriminator_or_generator == 'discriminator': model = ElectraForPreTraining(config) elif discriminator_or_generator == 'generator': model = ElectraForMaskedLM(config) else: raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'") load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator) print(f'Save PyTorch model to {pytorch_dump_path}') torch.save(model.state_dict(), pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--discriminator_or_generator', default=None, type=str, required=True, help="Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or 'generator'.") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator) # File: transformers-main/src/transformers/models/electra/modeling_electra.py """""" import math import os from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, get_activation from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/electra-small-discriminator' _CONFIG_FOR_DOC = 'ElectraConfig' def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator='discriminator'): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): original_name: str = name try: if isinstance(model, ElectraForMaskedLM): name = name.replace('electra/embeddings/', 'generator/embeddings/') if discriminator_or_generator == 'generator': name = name.replace('electra/', 'discriminator/') name = name.replace('generator/', 'electra/') name = name.replace('dense_1', 'dense_prediction') name = name.replace('generator_predictions/output_bias', 'generator_lm_head/bias') name = name.split('/') if any((n in ['global_step', 'temperature'] for n in name)): logger.info(f'Skipping {original_name}') continue pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+_\\d+', m_name): scope_names = re.split('_(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'kernel' or scope_names[0] == 'gamma': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'squad': pointer = getattr(pointer, 'classifier') else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name.endswith('_embeddings'): pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except ValueError as e: e.args += (pointer.shape, array.shape) raise print(f'Initialize PyTorch weight {name}', original_name) pointer.data = torch.from_numpy(array) except AttributeError as e: print(f'Skipping {original_name}', name, e) continue return model class ElectraEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class ElectraSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class ElectraSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ELECTRA_SELF_ATTENTION_CLASSES = {'eager': ElectraSelfAttention} class ElectraAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ELECTRA_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = ElectraSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ElectraIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ElectraOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class ElectraLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ElectraAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = ElectraAttention(config, position_embedding_type='absolute') self.intermediate = ElectraIntermediate(config) self.output = ElectraOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ElectraEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class ElectraDiscriminatorPredictions(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = get_activation(config.hidden_act) self.dense_prediction = nn.Linear(config.hidden_size, 1) self.config = config def forward(self, discriminator_hidden_states): hidden_states = self.dense(discriminator_hidden_states) hidden_states = self.activation(hidden_states) logits = self.dense_prediction(hidden_states).squeeze(-1) return logits class ElectraGeneratorPredictions(nn.Module): def __init__(self, config): super().__init__() self.activation = get_activation('gelu') self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.embedding_size) def forward(self, generator_hidden_states): hidden_states = self.dense(generator_hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class ElectraPreTrainedModel(PreTrainedModel): config_class = ElectraConfig load_tf_weights = load_tf_weights_in_electra base_model_prefix = 'electra' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class ElectraForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ELECTRA_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ELECTRA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n encoder_hidden_states (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model.', ELECTRA_START_DOCSTRING) class ElectraModel(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = ElectraEmbeddings(config) if config.embedding_size != config.hidden_size: self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size) self.encoder = ElectraEncoder(config) self.config = config self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) if hasattr(self, 'embeddings_project'): hidden_states = self.embeddings_project(hidden_states) hidden_states = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return hidden_states class ElectraClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.activation = get_activation('gelu') self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = self.activation(x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings('\n ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', ELECTRA_START_DOCSTRING) class ElectraForSequenceClassification(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.electra = ElectraModel(config) self.classifier = ElectraClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-emotion', output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'joy'", expected_loss=0.06) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = discriminator_hidden_states[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n It is recommended to load the discriminator checkpoint into that model.\n ', ELECTRA_START_DOCSTRING) class ElectraForPreTraining(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.discriminator_predictions = ElectraDiscriminatorPredictions(config) self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], ElectraForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.BCEWithLogitsLoss() if attention_mask is not None: active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1 active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss] active_labels = labels[active_loss] loss = loss_fct(active_logits, active_labels.float()) else: loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return ElectraForPreTrainingOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('\n Electra model with a language modeling head on top.\n\n Even though both the discriminator and generator may be loaded into this model, the generator is the only model of\n the two to have been trained for the masked language modeling task.\n ', ELECTRA_START_DOCSTRING) class ElectraForMaskedLM(ElectraPreTrainedModel): _tied_weights_keys = ['generator_lm_head.weight'] def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) self.post_init() def get_output_embeddings(self): return self.generator_lm_head def set_output_embeddings(self, word_embeddings): self.generator_lm_head = word_embeddings @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='google/electra-small-generator', output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='[MASK]', expected_output="'paris'", expected_loss=1.22) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict generator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output) prediction_scores = self.generator_lm_head(prediction_scores) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return (loss,) + output if loss is not None else output return MaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions) @add_start_docstrings('\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n ', ELECTRA_START_DOCSTRING) class ElectraForTokenClassification(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.electra = ElectraModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-discriminator-finetuned-conll03-english', output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']", expected_loss=0.11) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) discriminator_sequence_output = discriminator_hidden_states[0] discriminator_sequence_output = self.dropout(discriminator_sequence_output) logits = self.classifier(discriminator_sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('\n ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ELECTRA_START_DOCSTRING) class ElectraForQuestionAnswering(ElectraPreTrainedModel): config_class = ElectraConfig base_model_prefix = 'electra' def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.electra = ElectraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-squad2', output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=11, qa_target_end_index=12, expected_output="'a nice puppet'", expected_loss=2.64) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states) sequence_output = discriminator_hidden_states[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + discriminator_hidden_states[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('\n ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ELECTRA_START_DOCSTRING) class ElectraForMultipleChoice(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = discriminator_hidden_states[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.', ELECTRA_START_DOCSTRING) class ElectraForCausalLM(ElectraPreTrainedModel): _tied_weights_keys = ['generator_lm_head.weight'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`') self.electra = ElectraModel(config) self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.generator_lm_head def set_output_embeddings(self, new_embeddings): self.generator_lm_head = new_embeddings @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.generator_lm_head(self.generator_predictions(sequence_output)) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/electra/modeling_flax_electra.py from typing import Callable, Optional, Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/electra-small-discriminator' _CONFIG_FOR_DOC = 'ElectraConfig' remat = nn_partitioning.remat @flax.struct.dataclass class FlaxElectraForPreTrainingOutput(ModelOutput): logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None ELECTRA_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading, saving and converting weights from PyTorch models)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ELECTRA_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`numpy.ndarray` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n head_mask (`numpy.ndarray` of shape `({0})`, `optional):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\n' class FlaxElectraEmbeddings(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.word_embeddings = nn.Embed(self.config.vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.position_embeddings = nn.Embed(self.config.max_position_embeddings, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.token_type_embeddings = nn.Embed(self.config.type_vocab_size, self.config.embedding_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool=True): inputs_embeds = self.word_embeddings(input_ids.astype('i4')) position_embeds = self.position_embeddings(position_ids.astype('i4')) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype('i4')) hidden_states = inputs_embeds + token_type_embeddings + position_embeds hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxElectraSelfAttention(nn.Module): config: ElectraConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError('`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` : {self.config.num_attention_heads}') self.query = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.key = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) self.value = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic=True, output_attentions: bool=False): is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.query(hidden_states) if is_cross_attention: key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) if layer_head_mask is not None: attn_weights = jnp.einsum('...hqk,h->...hqk', attn_weights, layer_head_mask) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxElectraSelfOutput(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FlaxElectraAttention(nn.Module): config: ElectraConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxElectraSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxElectraSelfOutput(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool=False): attn_outputs = self.self(hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxElectraIntermediate(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxElectraOutput(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool=True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states class FlaxElectraLayer(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxElectraAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxElectraIntermediate(self.config, dtype=self.dtype) self.output = FlaxElectraOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxElectraAttention(self.config, causal=False, dtype=self.dtype) def __call__(self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False): attention_outputs = self.attention(hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions) attention_output = attention_outputs[0] if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs class FlaxElectraLayerCollection(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxElectraCheckpointLayer = remat(FlaxElectraLayer, static_argnums=(5, 6, 7)) self.layers = [FlaxElectraCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] else: self.layers = [FlaxElectraLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None if head_mask is not None: if head_mask.shape[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.shape[0]}.') for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer(hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) class FlaxElectraEncoder(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.layer = FlaxElectraLayerCollection(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__(self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): return self.layer(hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class FlaxElectraGeneratorPredictions(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN[self.config.hidden_act](hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class FlaxElectraDiscriminatorPredictions(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.dense_prediction = nn.Dense(1, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN[self.config.hidden_act](hidden_states) hidden_states = self.dense_prediction(hidden_states).squeeze(-1) return hidden_states class FlaxElectraPreTrainedModel(FlaxPreTrainedModel): config_class = ElectraConfig base_model_prefix = 'electra' module_class: nn.Module = None def __init__(self, config: ElectraConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, gradient_checkpointing: bool=False, **kwargs): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class(config=self.config, dtype=self.dtype, gradient_checkpointing=True) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False) random_params = module_init_outputs['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length), dtype='i4') attention_mask = jnp.ones_like(input_ids, dtype='i4') position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, past_key_values: dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if token_type_ids is None: token_type_ids = jnp.ones_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if self.config.add_cross_attention: if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] else: outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), token_type_ids=jnp.array(token_type_ids, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), head_mask=jnp.array(head_mask, dtype='i4'), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs) return outputs class FlaxElectraModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxElectraEmbeddings(self.config, dtype=self.dtype) if self.config.embedding_size != self.config.hidden_size: self.embeddings_project = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.encoder = FlaxElectraEncoder(self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) def __call__(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask: Optional[np.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): embeddings = self.embeddings(input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic) if hasattr(self, 'embeddings_project'): embeddings = self.embeddings_project(embeddings) return self.encoder(embeddings, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings('The bare Electra Model transformer outputting raw hidden-states without any specific head on top.', ELECTRA_START_DOCSTRING) class FlaxElectraModel(FlaxElectraPreTrainedModel): module_class = FlaxElectraModule append_call_sample_docstring(FlaxElectraModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxElectraTiedDense(nn.Module): embedding_size: int dtype: jnp.dtype = jnp.float32 precision = None bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.bias = self.param('bias', self.bias_init, (self.embedding_size,)) def __call__(self, x, kernel): x = jnp.asarray(x, self.dtype) kernel = jnp.asarray(kernel, self.dtype) y = lax.dot_general(x, kernel, (((x.ndim - 1,), (0,)), ((), ())), precision=self.precision) bias = jnp.asarray(self.bias, self.dtype) return y + bias class FlaxElectraForMaskedLMModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype) if self.config.tie_word_embeddings: self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype) else: self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] prediction_scores = self.generator_predictions(hidden_states) if self.config.tie_word_embeddings: shared_embedding = self.electra.variables['params']['embeddings']['word_embeddings']['embedding'] prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T) else: prediction_scores = self.generator_lm_head(prediction_scores) if not return_dict: return (prediction_scores,) + outputs[1:] return FlaxMaskedLMOutput(logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Electra Model with a `language modeling` head on top.', ELECTRA_START_DOCSTRING) class FlaxElectraForMaskedLM(FlaxElectraPreTrainedModel): module_class = FlaxElectraForMaskedLMModule append_call_sample_docstring(FlaxElectraForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC) class FlaxElectraForPreTrainingModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.discriminator_predictions = FlaxElectraDiscriminatorPredictions(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.discriminator_predictions(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxElectraForPreTrainingOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n It is recommended to load the discriminator checkpoint into that model.\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForPreTraining(FlaxElectraPreTrainedModel): module_class = FlaxElectraForPreTrainingModule FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING = '\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, FlaxElectraForPreTraining\n\n >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")\n >>> model = FlaxElectraForPreTraining.from_pretrained("google/electra-small-discriminator")\n\n >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\n' overwrite_call_docstring(FlaxElectraForPreTraining, ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length') + FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING) append_replace_return_docstrings(FlaxElectraForPreTraining, output_type=FlaxElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) class FlaxElectraForTokenClassificationModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForTokenClassification(FlaxElectraPreTrainedModel): module_class = FlaxElectraForTokenClassificationModule append_call_sample_docstring(FlaxElectraForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC) def identity(x, **kwargs): return x class FlaxElectraSequenceSummary(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.summary = identity if hasattr(self.config, 'summary_use_proj') and self.config.summary_use_proj: if hasattr(self.config, 'summary_proj_to_labels') and self.config.summary_proj_to_labels and (self.config.num_labels > 0): num_classes = self.config.num_labels else: num_classes = self.config.hidden_size self.summary = nn.Dense(num_classes, dtype=self.dtype) activation_string = getattr(self.config, 'summary_activation', None) self.activation = ACT2FN[activation_string] if activation_string else lambda x: x self.first_dropout = identity if hasattr(self.config, 'summary_first_dropout') and self.config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(self.config.summary_first_dropout) self.last_dropout = identity if hasattr(self.config, 'summary_last_dropout') and self.config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(self.config.summary_last_dropout) def __call__(self, hidden_states, cls_index=None, deterministic: bool=True): output = hidden_states[:, 0] output = self.first_dropout(output, deterministic=deterministic) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output, deterministic=deterministic) return output class FlaxElectraForMultipleChoiceModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.sequence_summary = FlaxElectraSequenceSummary(config=self.config, dtype=self.dtype) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] pooled_output = self.sequence_summary(hidden_states, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[1:] return FlaxMultipleChoiceModelOutput(logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForMultipleChoice(FlaxElectraPreTrainedModel): module_class = FlaxElectraForMultipleChoiceModule overwrite_call_docstring(FlaxElectraForMultipleChoice, ELECTRA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) append_call_sample_docstring(FlaxElectraForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC) class FlaxElectraForQuestionAnsweringModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) (start_logits, end_logits) = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput(start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForQuestionAnswering(FlaxElectraPreTrainedModel): module_class = FlaxElectraForQuestionAnsweringModule append_call_sample_docstring(FlaxElectraForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC) class FlaxElectraClassificationHead(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) classifier_dropout = self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, hidden_states, deterministic: bool=True): x = hidden_states[:, 0, :] x = self.dropout(x, deterministic=deterministic) x = self.dense(x) x = ACT2FN['gelu'](x) x = self.dropout(x, deterministic=deterministic) x = self.out_proj(x) return x class FlaxElectraForSequenceClassificationModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.classifier = FlaxElectraClassificationHead(config=self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.classifier(hidden_states, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForSequenceClassification(FlaxElectraPreTrainedModel): module_class = FlaxElectraForSequenceClassificationModule append_call_sample_docstring(FlaxElectraForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC) class FlaxElectraForCausalLMModule(nn.Module): config: ElectraConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.electra = FlaxElectraModule(config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing) self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype) if self.config.tie_word_embeddings: self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype) else: self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype) def __call__(self, input_ids, attention_mask: Optional[jnp.ndarray]=None, token_type_ids: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, head_mask: Optional[jnp.ndarray]=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.electra(input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] prediction_scores = self.generator_predictions(hidden_states) if self.config.tie_word_embeddings: shared_embedding = self.electra.variables['params']['embeddings']['word_embeddings']['embedding'] prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T) else: prediction_scores = self.generator_lm_head(prediction_scores) if not return_dict: return (prediction_scores,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions(logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('\n Electra Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for\n autoregressive tasks.\n ', ELECTRA_START_DOCSTRING) class FlaxElectraForCausalLM(FlaxElectraPreTrainedModel): module_class = FlaxElectraForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxElectraForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/electra/modeling_tf_electra.py """""" from __future__ import annotations import math import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/electra-small-discriminator' _CONFIG_FOR_DOC = 'ElectraConfig' class TFElectraSelfAttention(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.dropout(inputs=attention_probs, training=training) if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) class TFElectraSelfOutput(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFElectraAttention(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFElectraSelfAttention(config, name='self') self.dense_output = TFElectraSelfOutput(config, name='output') def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: Tuple[tf.Tensor], output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_outputs = self.self_attention(hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) attention_output = self.dense_output(hidden_states=self_outputs[0], input_tensor=input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attention', None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, 'dense_output', None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class TFElectraIntermediate(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFElectraOutput(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFElectraLayer(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.attention = TFElectraAttention(config, name='attention') self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = TFElectraAttention(config, name='crossattention') self.intermediate = TFElectraIntermediate(config, name='intermediate') self.bert_output = TFElectraOutput(config, name='output') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: Tuple[tf.Tensor] | None, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output(hidden_states=intermediate_output, input_tensor=attention_output, training=training) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'intermediate', None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, 'bert_output', None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, 'crossattention', None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) class TFElectraEncoder(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFElectraLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: Tuple[Tuple[tf.Tensor]] | None, use_cache: Optional[bool], output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFElectraPooler(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFElectraEmbeddings(keras.layers.Layer): def __init__(self, config: ElectraConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('token_type_embeddings'): self.token_type_embeddings = self.add_weight(name='embeddings', shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range)) with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('Need to provide either `input_ids` or `input_embeds`.') if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFElectraDiscriminatorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, name='dense') self.dense_prediction = keras.layers.Dense(1, name='dense_prediction') self.config = config def call(self, discriminator_hidden_states, training=False): hidden_states = self.dense(discriminator_hidden_states) hidden_states = get_tf_activation(self.config.hidden_act)(hidden_states) logits = tf.squeeze(self.dense_prediction(hidden_states), -1) return logits def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'dense_prediction', None) is not None: with tf.name_scope(self.dense_prediction.name): self.dense_prediction.build([None, None, self.config.hidden_size]) class TFElectraGeneratorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.dense = keras.layers.Dense(config.embedding_size, name='dense') self.config = config def call(self, generator_hidden_states, training=False): hidden_states = self.dense(generator_hidden_states) hidden_states = get_tf_activation('gelu')(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFElectraPreTrainedModel(TFPreTrainedModel): config_class = ElectraConfig base_model_prefix = 'electra' _keys_to_ignore_on_load_unexpected = ['generator_lm_head.weight'] _keys_to_ignore_on_load_missing = ['dropout'] @keras_serializable class TFElectraMainLayer(keras.layers.Layer): config_class = ElectraConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFElectraEmbeddings(config, name='embeddings') if config.embedding_size != config.hidden_size: self.embeddings_project = keras.layers.Dense(config.hidden_size, name='embeddings_project') self.encoder = TFElectraEncoder(config, name='encoder') def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError def get_extended_attention_mask(self, attention_mask, input_shape, dtype, past_key_values_length=0): (batch_size, seq_length) = input_shape if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal(tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None]) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape(extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])) if past_key_values_length > 0: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) extended_attention_mask = tf.cast(extended_attention_mask, dtype=dtype) one_cst = tf.constant(1.0, dtype=dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) return extended_attention_mask def get_head_mask(self, head_mask): if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers return head_mask @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype, past_key_values_length) if self.is_decoder and encoder_attention_mask is not None: encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask) if hasattr(self, 'embeddings_project'): hidden_states = self.embeddings_project(hidden_states, training=training) hidden_states = self.encoder(hidden_states=hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'embeddings_project', None) is not None: with tf.name_scope(self.embeddings_project.name): self.embeddings_project.build([None, None, self.config.embedding_size]) @dataclass class TFElectraForPreTrainingOutput(ModelOutput): logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None ELECTRA_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ELECTRA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different. Both the generator and discriminator checkpoints may be loaded into this model.', ELECTRA_START_DOCSTRING) class TFElectraModel(TFElectraPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name='electra') @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) @add_start_docstrings('\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n Even though both the discriminator and generator may be loaded into this model, the discriminator is the only model\n of the two to have the correct classification head to be used for this model.\n ', ELECTRA_START_DOCSTRING) class TFElectraForPreTraining(TFElectraPreTrainedModel): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.electra = TFElectraMainLayer(config, name='electra') self.discriminator_predictions = TFElectraDiscriminatorPredictions(config, name='discriminator_predictions') @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFElectraForPreTrainingOutput, Tuple[tf.Tensor]]: discriminator_hidden_states = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) if not return_dict: return (logits,) + discriminator_hidden_states[1:] return TFElectraForPreTrainingOutput(logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'discriminator_predictions', None) is not None: with tf.name_scope(self.discriminator_predictions.name): self.discriminator_predictions.build(None) class TFElectraMaskedLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {'bias': self.bias} def set_bias(self, value): self.bias = value['bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states @add_start_docstrings('\n Electra model with a language modeling head on top.\n\n Even though both the discriminator and generator may be loaded into this model, the generator is the only model of\n the two to have been trained for the masked language modeling task.\n ', ELECTRA_START_DOCSTRING) class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.config = config self.electra = TFElectraMainLayer(config, name='electra') self.generator_predictions = TFElectraGeneratorPredictions(config, name='generator_predictions') if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.generator_lm_head = TFElectraMaskedLMHead(config, self.electra.embeddings, name='generator_lm_head') def get_lm_head(self): return self.generator_lm_head def get_prefix_bias_name(self): warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.generator_lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='google/electra-small-generator', output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='[MASK]', expected_output="'paris'", expected_loss=1.22) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: generator_hidden_states = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output, training=training) prediction_scores = self.generator_lm_head(prediction_scores, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'generator_predictions', None) is not None: with tf.name_scope(self.generator_predictions.name): self.generator_predictions.build(None) if getattr(self, 'generator_lm_head', None) is not None: with tf.name_scope(self.generator_lm_head.name): self.generator_lm_head.build(None) class TFElectraClassificationHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') classifier_dropout = config.classifhidden_dropout_probier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='out_proj') self.config = config def call(self, inputs, **kwargs): x = inputs[:, 0, :] x = self.dropout(x) x = self.dense(x) x = get_tf_activation('gelu')(x) x = self.dropout(x) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ', ELECTRA_START_DOCSTRING) class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.electra = TFElectraMainLayer(config, name='electra') self.classifier = TFElectraClassificationHead(config, name='classifier') @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-emotion', output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'joy'", expected_loss=0.06) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: outputs = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) logits = self.classifier(outputs[0]) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings('\n ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ELECTRA_START_DOCSTRING) class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.electra = TFElectraMainLayer(config, name='electra') self.sequence_summary = TFSequenceSummary(config, initializer_range=config.initializer_range, name='sequence_summary') self.classifier = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None outputs = self.electra(input_ids=flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) logits = self.sequence_summary(outputs[0]) logits = self.classifier(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'sequence_summary', None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n ', ELECTRA_START_DOCSTRING) class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) self.electra = TFElectraMainLayer(config, name='electra') classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-discriminator-finetuned-conll03-english', output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['B-LOC', 'B-ORG', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'O', 'B-LOC', 'I-LOC']", expected_loss=0.11) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: discriminator_hidden_states = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) discriminator_sequence_output = discriminator_hidden_states[0] discriminator_sequence_output = self.dropout(discriminator_sequence_output) logits = self.classifier(discriminator_sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Electra Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ELECTRA_START_DOCSTRING) class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.electra = TFElectraMainLayer(config, name='electra') self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='bhadresh-savani/electra-base-squad2', output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=11, qa_target_end_index=12, expected_output="'a nice puppet'", expected_loss=2.64) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: discriminator_hidden_states = self.electra(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.qa_outputs(discriminator_sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'electra', None) is not None: with tf.name_scope(self.electra.name): self.electra.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) # File: transformers-main/src/transformers/models/electra/tokenization_electra.py import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class ElectraTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/electra/tokenization_electra_fast.py import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} class ElectraTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = ElectraTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/encodec/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_encodec': ['EncodecConfig'], 'feature_extraction_encodec': ['EncodecFeatureExtractor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_encodec'] = ['EncodecModel', 'EncodecPreTrainedModel'] if TYPE_CHECKING: from .configuration_encodec import EncodecConfig from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import EncodecModel, EncodecPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/encodec/configuration_encodec.py """""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EncodecConfig(PretrainedConfig): model_type = 'encodec' def __init__(self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type='weight_norm', kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode='reflect', compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs): self.target_bandwidths = target_bandwidths self.sampling_rate = sampling_rate self.audio_channels = audio_channels self.normalize = normalize self.chunk_length_s = chunk_length_s self.overlap = overlap self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.norm_type = norm_type self.kernel_size = kernel_size self.last_kernel_size = last_kernel_size self.residual_kernel_size = residual_kernel_size self.dilation_growth_rate = dilation_growth_rate self.use_causal_conv = use_causal_conv self.pad_mode = pad_mode self.compress = compress self.num_lstm_layers = num_lstm_layers self.trim_right_ratio = trim_right_ratio self.codebook_size = codebook_size self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size self.use_conv_shortcut = use_conv_shortcut if self.norm_type not in ['weight_norm', 'time_group_norm']: raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}') super().__init__(**kwargs) @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) @property def frame_rate(self) -> int: hop_length = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def num_quantizers(self) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10)) # File: transformers-main/src/transformers/models/encodec/convert_encodec_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging logging.set_verbosity_info() logger = logging.get_logger('transformers.models.encodec') MAPPING_QUANTIZER = {'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited', 'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size', 'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed', 'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg'} MAPPING_ENCODER = {'encoder.model.0.conv.conv': 'encoder.layers.0.conv', 'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv', 'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv', 'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv', 'encoder.model.3.conv.conv': 'encoder.layers.3.conv', 'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv', 'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv', 'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv', 'encoder.model.6.conv.conv': 'encoder.layers.6.conv', 'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv', 'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv', 'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv', 'encoder.model.9.conv.conv': 'encoder.layers.9.conv', 'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv', 'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv', 'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv', 'encoder.model.12.conv.conv': 'encoder.layers.12.conv', 'encoder.model.13.lstm': 'encoder.layers.13.lstm', 'encoder.model.15.conv.conv': 'encoder.layers.15.conv'} MAPPING_ENCODER_48K = {'encoder.model.0.conv.norm': 'encoder.layers.0.norm', 'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm', 'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm', 'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm', 'encoder.model.3.conv.norm': 'encoder.layers.3.norm', 'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm', 'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm', 'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm', 'encoder.model.6.conv.norm': 'encoder.layers.6.norm', 'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm', 'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm', 'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm', 'encoder.model.9.conv.norm': 'encoder.layers.9.norm', 'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm', 'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm', 'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm', 'encoder.model.12.conv.norm': 'encoder.layers.12.norm', 'encoder.model.15.conv.norm': 'encoder.layers.15.norm'} MAPPING_DECODER = {'decoder.model.0.conv.conv': 'decoder.layers.0.conv', 'decoder.model.1.lstm': 'decoder.layers.1.lstm', 'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv', 'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv', 'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv', 'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv', 'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv', 'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv', 'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv', 'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv', 'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv', 'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv', 'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv', 'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv', 'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv', 'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv', 'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv', 'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv', 'decoder.model.15.conv.conv': 'decoder.layers.15.conv'} MAPPING_DECODER_48K = {'decoder.model.0.conv.norm': 'decoder.layers.0.norm', 'decoder.model.3.convtr.norm': 'decoder.layers.3.norm', 'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm', 'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm', 'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm', 'decoder.model.6.convtr.norm': 'decoder.layers.6.norm', 'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm', 'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm', 'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm', 'decoder.model.9.convtr.norm': 'decoder.layers.9.norm', 'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm', 'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm', 'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm', 'decoder.model.12.convtr.norm': 'decoder.layers.12.norm', 'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm', 'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm', 'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm', 'decoder.model.15.conv.norm': 'decoder.layers.15.norm'} MAPPING_24K = {**MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER} MAPPING_48K = {**MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K} TOP_LEVEL_KEYS = [] IGNORE_KEYS = [] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split('.'): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape if hf_shape != value.shape: raise ValueError(f"Shape of hf {(key + '.' + weight_type if weight_type is not None else '')} is {hf_shape}, but should be {value.shape} for {full_name}") if weight_type == 'weight': hf_pointer.weight.data = value elif weight_type == 'weight_g': hf_pointer.weight_g.data = value elif weight_type == 'weight_v': hf_pointer.weight_v.data = value elif weight_type == 'bias': hf_pointer.bias.data = value elif weight_type == 'running_mean': hf_pointer.running_mean.data = value elif weight_type == 'running_var': hf_pointer.running_var.data = value elif weight_type == 'num_batches_tracked': hf_pointer.num_batches_tracked.data = value elif weight_type == 'weight_ih_l0': hf_pointer.weight_ih_l0.data = value elif weight_type == 'weight_hh_l0': hf_pointer.weight_hh_l0.data = value elif weight_type == 'bias_ih_l0': hf_pointer.bias_ih_l0.data = value elif weight_type == 'bias_hh_l0': hf_pointer.bias_hh_l0.data = value elif weight_type == 'weight_ih_l1': hf_pointer.weight_ih_l1.data = value elif weight_type == 'weight_hh_l1': hf_pointer.weight_hh_l1.data = value elif weight_type == 'bias_ih_l1': hf_pointer.bias_ih_l1.data = value elif weight_type == 'bias_hh_l1': hf_pointer.bias_hh_l1.data = value else: hf_pointer.data = value logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") def should_ignore(name, ignore_keys): for key in ignore_keys: if key.endswith('.*'): if name.startswith(key[:-1]): return True elif '.*.' in key: (prefix, suffix) = key.split('.*.') if prefix in name and suffix in name: return True elif key in name: return True return False def recursively_load_weights(orig_dict, hf_model, model_name): unused_weights = [] if model_name in ['encodec_24khz', 'encodec_32khz']: MAPPING = MAPPING_24K elif model_name == 'encodec_48khz': MAPPING = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}') for (name, value) in orig_dict.items(): if should_ignore(name, IGNORE_KEYS): logger.info(f'{name} was ignored') continue is_used = False for (key, mapped_key) in MAPPING.items(): if '*' in key: (prefix, suffix) = key.split('.*.') if prefix in name and suffix in name: key = suffix if key in name: if key.endswith('embed') and name.endswith('embed_avg'): continue is_used = True if '*' in mapped_key: layer_index = name.split(key)[0].split('.')[-2] mapped_key = mapped_key.replace('*', layer_index) if 'weight_g' in name: weight_type = 'weight_g' elif 'weight_v' in name: weight_type = 'weight_v' elif 'weight_ih_l0' in name: weight_type = 'weight_ih_l0' elif 'weight_hh_l0' in name: weight_type = 'weight_hh_l0' elif 'bias_ih_l0' in name: weight_type = 'bias_ih_l0' elif 'bias_hh_l0' in name: weight_type = 'bias_hh_l0' elif 'weight_ih_l1' in name: weight_type = 'weight_ih_l1' elif 'weight_hh_l1' in name: weight_type = 'weight_hh_l1' elif 'bias_ih_l1' in name: weight_type = 'bias_ih_l1' elif 'bias_hh_l1' in name: weight_type = 'bias_hh_l1' elif 'bias' in name: weight_type = 'bias' elif 'weight' in name: weight_type = 'weight' elif 'running_mean' in name: weight_type = 'running_mean' elif 'running_var' in name: weight_type = 'running_var' elif 'num_batches_tracked' in name: weight_type = 'num_batches_tracked' else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f'Unused weights: {unused_weights}') @torch.no_grad() def convert_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None): if config_path is not None: config = EncodecConfig.from_pretrained(config_path) else: config = EncodecConfig() if model_name == 'encodec_24khz': pass elif model_name == 'encodec_32khz': config.upsampling_ratios = [8, 5, 4, 4] config.target_bandwidths = [2.2] config.num_filters = 64 config.sampling_rate = 32000 config.codebook_size = 2048 config.use_causal_conv = False config.normalize = False config.use_conv_shortcut = False elif model_name == 'encodec_48khz': config.upsampling_ratios = [8, 5, 4, 2] config.target_bandwidths = [3.0, 6.0, 12.0, 24.0] config.sampling_rate = 48000 config.audio_channels = 2 config.use_causal_conv = False config.norm_type = 'time_group_norm' config.normalize = True config.chunk_length_s = 1.0 config.overlap = 0.01 else: raise ValueError(f'Unknown model name: {model_name}') model = EncodecModel(config) feature_extractor = EncodecFeatureExtractor(feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap) feature_extractor.save_pretrained(pytorch_dump_folder_path) original_checkpoint = torch.load(checkpoint_path) if 'best_state' in original_checkpoint: original_checkpoint = original_checkpoint['best_state'] recursively_load_weights(original_checkpoint, model, model_name) model.save_pretrained(pytorch_dump_folder_path) if repo_id: print('Pushing to the hub...') feature_extractor.push_to_hub(repo_id) model.push_to_hub(repo_id) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model', default='encodec_24khz', type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.") parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.') args = parser.parse_args() convert_checkpoint(args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub) # File: transformers-main/src/transformers/models/encodec/feature_extraction_encodec.py """""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging logger = logging.get_logger(__name__) class EncodecFeatureExtractor(SequenceFeatureExtractor): model_input_names = ['input_values', 'padding_mask'] def __init__(self, feature_size: int=1, sampling_rate: int=24000, padding_value: float=0.0, chunk_length_s: float=None, overlap: float=None, **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) self.chunk_length_s = chunk_length_s self.overlap = overlap @property def chunk_length(self) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) @property def chunk_stride(self) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length)) def __call__(self, raw_audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided audio input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning('It is strongly recommended to pass the `sampling_rate` argument to this function. Failing to do so can result in silent errors that might be hard to debug.') if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.') elif padding is None: padding = True is_batched = bool(isinstance(raw_audio, (list, tuple)) and isinstance(raw_audio[0], (np.ndarray, tuple, list))) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and (not isinstance(raw_audio, np.ndarray)): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) if not is_batched: raw_audio = [np.asarray(raw_audio).T] for (idx, example) in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}') if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels') if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels') padded_inputs = None input_values = BatchFeature({'input_values': raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and (max_length is None): if truncation: max_length = min((array.shape[0] for array in raw_audio)) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max((array.shape[0] for array in raw_audio)) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = 'max_length' else: padded_inputs = input_values if padded_inputs is None: padded_inputs = self.pad(input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding) if padding: padded_inputs['padding_mask'] = padded_inputs.pop('attention_mask') input_values = [] for example in padded_inputs.pop('input_values'): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs['input_values'] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs # File: transformers-main/src/transformers/models/encodec/modeling_encodec.py """""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_encodec import EncodecConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'EncodecConfig' @dataclass class EncodecOutput(ModelOutput): audio_codes: torch.LongTensor = None audio_values: torch.FloatTensor = None @dataclass class EncodecEncoderOutput(ModelOutput): audio_codes: torch.LongTensor = None audio_scales: torch.FloatTensor = None @dataclass class EncodecDecoderOutput(ModelOutput): audio_values: torch.FloatTensor = None class EncodecConv1d(nn.Module): def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode self.norm_type = config.norm_type if self.norm_type not in ['weight_norm', 'time_group_norm']: raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}') if stride > 1 and dilation > 1: logger.warning(f'EncodecConv1d has been initialized with stride > 1 and dilation > 1 (kernel_size={kernel_size} stride={stride}, dilation={dilation}).') self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation) if self.norm_type == 'weight_norm': self.conv = nn.utils.weight_norm(self.conv) elif self.norm_type == 'time_group_norm': self.norm = nn.GroupNorm(1, out_channels) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer('stride', stride, persistent=False) self.register_buffer('kernel_size', kernel_size, persistent=False) self.register_buffer('padding_total', torch.tensor(kernel_size - stride, dtype=torch.int64), persistent=False) def _get_extra_padding_for_conv1d(self, hidden_states: torch.Tensor) -> torch.Tensor: length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod def _pad1d(hidden_states: torch.Tensor, paddings: Tuple[int, int], mode: str='zero', value: float=0.0): length = hidden_states.shape[-1] (padding_left, padding_right) = paddings if not mode == 'reflect': return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def forward(self, hidden_states): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if self.causal: hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: padding_right = self.padding_total // 2 padding_left = self.padding_total - padding_right hidden_states = self._pad1d(hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode) hidden_states = self.conv(hidden_states) if self.norm_type == 'time_group_norm': hidden_states = self.norm(hidden_states) return hidden_states class EncodecConvTranspose1d(nn.Module): def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.norm_type = config.norm_type if self.norm_type not in ['weight_norm', 'time_group_norm']: raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}') self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride) if config.norm_type == 'weight_norm': self.conv = nn.utils.weight_norm(self.conv) elif config.norm_type == 'time_group_norm': self.norm = nn.GroupNorm(1, out_channels) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError('`trim_right_ratio` != 1.0 only makes sense for causal convolutions') def forward(self, hidden_states): kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride hidden_states = self.conv(hidden_states) if self.norm_type == 'time_group_norm': hidden_states = self.norm(hidden_states) if self.causal: padding_right = math.ceil(padding_total * self.trim_right_ratio) else: padding_right = padding_total // 2 padding_left = padding_total - padding_right end = hidden_states.shape[-1] - padding_right hidden_states = hidden_states[..., padding_left:end] return hidden_states class EncodecLSTM(nn.Module): def __init__(self, config, dimension): super().__init__() self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers) def forward(self, hidden_states): hidden_states = hidden_states.permute(2, 0, 1) hidden_states = self.lstm(hidden_states)[0] + hidden_states hidden_states = hidden_states.permute(1, 2, 0) return hidden_states class EncodecResnetBlock(nn.Module): def __init__(self, config: EncodecConfig, dim: int, dilations: List[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError('Number of kernel sizes should match number of dilations') hidden = dim // config.compress block = [] for (i, (kernel_size, dilation)) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states): residual = hidden_states for layer in self.block: hidden_states = layer(hidden_states) return self.shortcut(residual) + hidden_states class EncodecEncoder(nn.Module): def __init__(self, config: EncodecConfig): super().__init__() model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate ** j, 1])] model += [nn.ELU()] model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [EncodecLSTM(config, scaling * config.num_filters)] model += [nn.ELU()] model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecDecoder(nn.Module): def __init__(self, config: EncodecConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] model += [EncodecLSTM(config, scaling * config.num_filters)] for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters model += [nn.ELU()] model += [EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)] for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate ** j, 1))] scaling //= 2 model += [nn.ELU()] model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecEuclideanCodebook(nn.Module): def __init__(self, config: EncodecConfig): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer('inited', torch.Tensor([True])) self.register_buffer('cluster_size', torch.zeros(config.codebook_size)) self.register_buffer('embed', embed) self.register_buffer('embed_avg', embed.clone()) def quantize(self, hidden_states): embed = self.embed.t() scaled_states = hidden_states.pow(2).sum(1, keepdim=True) dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True)) embed_ind = dist.max(dim=-1).indices return embed_ind def encode(self, hidden_states): shape = hidden_states.shape hidden_states = hidden_states.reshape((-1, shape[-1])) embed_ind = self.quantize(hidden_states) embed_ind = embed_ind.view(*shape[:-1]) return embed_ind def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize class EncodecVectorQuantization(nn.Module): def __init__(self, config: EncodecConfig): super().__init__() self.codebook = EncodecEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize class EncodecResidualVectorQuantizer(nn.Module): def __init__(self, config: EncodecConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = config.num_quantizers self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)]) def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float]=None) -> int: bw_per_q = math.log2(self.codebook_size) * self.frame_rate num_quantizers = self.num_quantizers if bandwidth is not None and bandwidth > 0.0: num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q))) return num_quantizers def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float]=None) -> torch.Tensor: num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth) residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: quantized_out = torch.tensor(0.0, device=codes.device) for (i, indices) in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized return quantized_out class EncodecPreTrainedModel(PreTrainedModel): config_class = EncodecConfig base_model_prefix = 'encodec' main_input_name = 'input_values' def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LSTM): for (name, param) in module.named_parameters(): if 'weight' in name: nn.init.xavier_uniform_(param) elif 'bias' in name: nn.init.constant_(param, 0.0) ENCODEC_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`EncodecConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ENCODEC_INPUTS_DOCSTRING = '\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):\n Raw audio input converted to Float and padded to the approriate length in order to be encoded using chunks\n of length self.chunk_length and a stride of `config.chunk_stride`.\n padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):\n Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).\n Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n \n\n `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in\n order to process tensors effectively, the input audio should be padded so that `input_length % stride =\n step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape\n\n \n\n bandwidth (`float`, *optional*):\n The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible\n bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as\n `bandwidth == 6.0`\n audio_codes (`torch.LongTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):\n Discret code embeddings computed using `model.encode`.\n audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):\n Scaling factor for each `audio_codes` input.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The EnCodec neural audio codec model.', ENCODEC_START_DOCSTRING) class EncodecModel(EncodecPreTrainedModel): def __init__(self, config: EncodecConfig): super().__init__(config) self.config = config self.encoder = EncodecEncoder(config) self.decoder = EncodecDecoder(config) self.quantizer = EncodecResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2 ** self.bits_per_codebook != self.config.codebook_size: raise ValueError('The codebook_size must be a power of 2.') self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame(self, input_values: torch.Tensor, bandwidth: float, padding_mask: int) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: length = input_values.shape[-1] duration = length / self.config.sampling_rate if self.config.chunk_length_s is not None and duration > 1e-05 + self.config.chunk_length_s: raise RuntimeError(f'Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}') scale = None if self.config.normalize: input_values = input_values * padding_mask mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1] scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-08 input_values = input_values / scale embeddings = self.encoder(input_values) codes = self.quantizer.encode(embeddings, bandwidth) codes = codes.transpose(0, 1) return (codes, scale) def encode(self, input_values: torch.Tensor, padding_mask: torch.Tensor=None, bandwidth: Optional[float]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], EncodecEncoderOutput]: return_dict = return_dict if return_dict is not None else self.config.return_dict if bandwidth is None: bandwidth = self.config.target_bandwidths[0] if bandwidth not in self.config.target_bandwidths: raise ValueError(f"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}.") (_, channels, input_length) = input_values.shape if channels < 1 or channels > 2: raise ValueError(f'Number of audio channels must be 1 or 2, but got {channels}') chunk_length = self.config.chunk_length if chunk_length is None: chunk_length = input_length stride = input_length else: stride = self.config.chunk_stride if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() encoded_frames = [] scales = [] step = chunk_length - stride if input_length % stride - step != 0: raise ValueError('The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly.') for offset in range(0, input_length - step, stride): mask = padding_mask[..., offset:offset + chunk_length].bool() frame = input_values[:, :, offset:offset + chunk_length] (encoded_frame, scale) = self._encode_frame(frame, bandwidth, mask) encoded_frames.append(encoded_frame) scales.append(scale) encoded_frames = torch.stack(encoded_frames) if not return_dict: return (encoded_frames, scales) return EncodecEncoderOutput(encoded_frames, scales) @staticmethod def _linear_overlap_add(frames: List[torch.Tensor], stride: int): if len(frames) == 0: raise ValueError('`frames` cannot be an empty list.') device = frames[0].device dtype = frames[0].dtype shape = frames[0].shape[:-1] total_size = stride * (len(frames) - 1) + frames[-1].shape[-1] frame_length = frames[0].shape[-1] time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1] weight = 0.5 - (time_vec - 0.5).abs() sum_weight = torch.zeros(total_size, device=device, dtype=dtype) out = torch.zeros(*shape, total_size, device=device, dtype=dtype) offset: int = 0 for frame in frames: frame_length = frame.shape[-1] out[..., offset:offset + frame_length] += weight[:frame_length] * frame sum_weight[offset:offset + frame_length] += weight[:frame_length] offset += stride if sum_weight.min() == 0: raise ValueError(f'`sum_weight` minimum element must be bigger than zero: {sum_weight}`') return out / sum_weight def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor]=None) -> torch.Tensor: codes = codes.transpose(0, 1) embeddings = self.quantizer.decode(codes) outputs = self.decoder(embeddings) if scale is not None: outputs = outputs * scale.view(-1, 1, 1) return outputs def decode(self, audio_codes: torch.Tensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]: return_dict = return_dict if return_dict is not None else self.config.return_dict chunk_length = self.config.chunk_length if chunk_length is None: if len(audio_codes) != 1: raise ValueError(f'Expected one frame, got {len(audio_codes)}') audio_values = self._decode_frame(audio_codes[0], audio_scales[0]) else: decoded_frames = [] for (frame, scale) in zip(audio_codes, audio_scales): frames = self._decode_frame(frame, scale) decoded_frames.append(frames) audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1) if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., :padding_mask.shape[-1]] if not return_dict: return (audio_values,) return EncodecDecoderOutput(audio_values) @add_start_docstrings_to_model_forward(ENCODEC_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=EncodecOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, bandwidth: Optional[float]=None, audio_codes: Optional[torch.Tensor]=None, audio_scales: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor, torch.Tensor], EncodecOutput]: return_dict = return_dict if return_dict is not None else self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if audio_codes is not None and audio_scales is None: raise ValueError('You specified `audio_codes` but did not specify the `audio_scales`') if audio_scales is not None and audio_codes is None: raise ValueError('You specified `audio_scales` but did not specify the `audio_codes`') if audio_scales is None and audio_codes is None: (audio_codes, audio_scales) = self.encode(input_values, padding_mask, bandwidth, False) audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict)[0] if not return_dict: return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values) # File: transformers-main/src/transformers/models/encoder_decoder/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_encoder_decoder'] = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_encoder_decoder'] = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_encoder_decoder'] = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EncoderDecoderConfig(PretrainedConfig): model_type = 'encoder-decoder' is_composition = True def __init__(self, **kwargs): super().__init__(**kwargs) if 'encoder' not in kwargs or 'decoder' not in kwargs: raise ValueError(f'A configuraton of type {self.model_type} cannot be instantiated because both `encoder` and `decoder` sub-configurations were not passed, only {kwargs}') encoder_config = kwargs.pop('encoder') encoder_model_type = encoder_config.pop('model_type') decoder_config = kwargs.pop('decoder') decoder_model_type = decoder_config.pop('model_type') from ..auto.configuration_auto import AutoConfig self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) self.is_encoder_decoder = True @classmethod def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig: logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config') decoder_config.is_decoder = True decoder_config.add_cross_attention = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py """""" import gc import inspect import os import tempfile import warnings from typing import Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...configuration_utils import PretrainedConfig from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'EncoderDecoderConfig' DEPRECATION_WARNING = 'Version v4.12.0 introduces a better way to train encoder-decoder models by computing the loss inside the encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if fine-tuning a model trained with versions anterior to 4.12.0. The decoder_input_ids are now created based on the labels, no need to pass them yourself anymore.' ENCODER_DECODER_START_DOCSTRING = '\n This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the\n encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via\n [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]\n function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream\n generative task, like summarization.\n\n The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation\n tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation\n Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi\n Zhou, Wei Li, Peter J. Liu.\n\n After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models\n (see the examples for more information).\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ENCODER_DECODER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the\n right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):\n This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor\n of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the\n decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert `decoder_input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,\n ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.\n kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:\n\n - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.\n - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.\n" def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class EncoderDecoderModel(PreTrainedModel): config_class = EncoderDecoderConfig base_model_prefix = 'encoder_decoder' main_input_name = 'input_ids' supports_gradient_checkpointing = True _supports_param_buffer_assignment = False def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[PreTrainedModel]=None, decoder: Optional[PreTrainedModel]=None): if config is None and (encoder is None or decoder is None): raise ValueError('Either a configuration or an encoder and a decoder has to be provided.') if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) elif not isinstance(config, self.config_class): raise ValueError(f'Config: {config} has to be of type {self.config_class}') if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError(f"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for `config.encoder.hidden_size`.") super().__init__(config) if encoder is None: from ..auto.modeling_auto import AutoModel encoder = AutoModel.from_config(config.encoder, attn_implementation=config._attn_implementation) if decoder is None: from ..auto.modeling_auto import AutoModelForCausalLM decoder = AutoModelForCausalLM.from_config(config.decoder, attn_implementation=config._attn_implementation) self.encoder = encoder self.decoder = decoder if self.encoder.config.to_dict() != self.config.encoder.to_dict(): logger.warning(f'Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}') if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning(f'Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}') self.encoder.config = self.config.encoder self.decoder.config = self.config.decoder if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size) if self.encoder.get_output_embeddings() is not None: raise ValueError(f'The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head') decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys()) if 'encoder_hidden_states' not in decoder_signature: raise ValueError('The selected decoder is not prepared for the encoder hidden states to be passed. Please see the following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350') self.tie_weights() def tie_weights(self): if self.config.tie_encoder_decoder: decoder_base_model_prefix = self.decoder.base_model_prefix tied_weights = self._tie_encoder_decoder_weights(self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, 'encoder') self._dynamic_tied_weights_keys = tied_weights def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): from_tf = kwargs.pop('from_tf', False) if from_tf: from transformers import TFEncoderDecoderModel _tf_model = TFEncoderDecoderModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) config = _tf_model.config encoder = _tf_model.encoder.__class__(_tf_model.config.encoder) decoder = _tf_model.decoder.__class__(_tf_model.config.decoder) encoder(encoder.dummy_inputs) decoder(decoder.dummy_inputs) encoder_variables = {} for v in encoder.trainable_variables + encoder.non_trainable_variables: encoder_variables['/'.join(v.name.split('/')[1:])] = v decoder_variables = {} for v in decoder.trainable_variables + decoder.non_trainable_variables: decoder_variables['/'.join(v.name.split('/')[1:])] = v _encoder_variables = {} for v in _tf_model.encoder.trainable_variables + _tf_model.encoder.non_trainable_variables: _encoder_variables['/'.join(v.name.split('/')[2:])] = v _decoder_variables = {} for v in _tf_model.decoder.trainable_variables + _tf_model.decoder.non_trainable_variables: _decoder_variables['/'.join(v.name.split('/')[2:])] = v for (name, v) in encoder_variables.items(): v.assign(_encoder_variables[name]) for (name, v) in decoder_variables.items(): v.assign(_decoder_variables[name]) tf_model = TFEncoderDecoderModel(encoder=encoder, decoder=decoder) if hasattr(_tf_model, 'enc_to_dec_proj'): tf_model(tf_model.dummy_inputs) tf_model.enc_to_dec_proj.kernel.assign(_tf_model.enc_to_dec_proj.kernel) tf_model.enc_to_dec_proj.bias.assign(_tf_model.enc_to_dec_proj.bias) with tempfile.TemporaryDirectory() as tmpdirname: encoder_dir = os.path.join(tmpdirname, 'encoder') decoder_dir = os.path.join(tmpdirname, 'decoder') tf_model.encoder.save_pretrained(encoder_dir) tf_model.decoder.save_pretrained(decoder_dir) if hasattr(tf_model, 'enc_to_dec_proj'): enc_to_dec_proj_weight = torch.transpose(torch.from_numpy(tf_model.enc_to_dec_proj.kernel.numpy()), 1, 0) enc_to_dec_proj_bias = torch.from_numpy(tf_model.enc_to_dec_proj.bias.numpy()) del _tf_model del tf_model gc.collect() model = EncoderDecoderModel.from_encoder_decoder_pretrained(encoder_dir, decoder_dir, encoder_from_tf=True, decoder_from_tf=True) model.config = config if hasattr(model, 'enc_to_dec_proj'): model.enc_to_dec_proj.weight.data = enc_to_dec_proj_weight.contiguous() model.enc_to_dec_proj.bias.data = enc_to_dec_proj_bias.contiguous() return model if kwargs.get('_fast_init', False): logger.warning('Fast initialization is currently not supported for EncoderDecoderModel. Falling back to slow initialization...') kwargs['_fast_init'] = False return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) @classmethod def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: str=None, decoder_pretrained_model_name_or_path: str=None, *model_args, **kwargs) -> PreTrainedModel: kwargs_encoder = {argument[len('encoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')} for key in kwargs_encoder.keys(): del kwargs['encoder_' + key] for key in kwargs_decoder.keys(): del kwargs['decoder_' + key] encoder = kwargs_encoder.pop('model', None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError('If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_encoder: (encoder_config, kwargs_encoder) = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder['config'] = encoder_config encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop('model', None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_decoder: (decoder_config, kwargs_decoder) = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.") decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder['config'] = decoder_config if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False: logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`') decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) return cls(encoder=encoder, decoder=decoder, config=config) @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[Tuple[torch.FloatTensor]]=None, past_key_values: Tuple[Tuple[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_encoder = {argument: value for (argument, value) in kwargs.items() if not argument.startswith('decoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')} if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs_encoder) elif isinstance(encoder_outputs, tuple): encoder_outputs = BaseModelOutput(*encoder_outputs) encoder_hidden_states = encoder_outputs[0] if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, past_key_values=past_key_values, return_dict=return_dict, **kwargs_decoder) loss = None if labels is not None: warnings.warn(DEPRECATION_WARNING, FutureWarning) logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss_fct = CrossEntropyLoss() loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1)) if not return_dict: if loss is not None: return (loss,) + decoder_outputs + encoder_outputs else: return decoder_outputs + encoder_outputs return Seq2SeqLMOutput(loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs): decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) decoder_attention_mask = decoder_inputs['attention_mask'] if 'attention_mask' in decoder_inputs else None input_dict = {'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_input_ids': decoder_inputs['input_ids'], 'encoder_outputs': encoder_outputs, 'past_key_values': decoder_inputs['past_key_values'], 'use_cache': use_cache} return input_dict def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError('Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))') def _reorder_cache(self, past_key_values, beam_idx): return self.decoder._reorder_cache(past_key_values, beam_idx) # File: transformers-main/src/transformers/models/encoder_decoder/modeling_flax_encoder_decoder.py """""" import os from typing import Optional, Tuple, Union import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput from ...modeling_flax_utils import FlaxPreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_flax_auto import FlaxAutoModel, FlaxAutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'EncoderDecoderConfig' ENCODER_DECODER_START_DOCSTRING = '\n This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the\n encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via\n [`~AutoModel.from_pretrained`] function and the decoder is loaded via [`~AutoModelForCausalLM.from_pretrained`]\n function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream\n generative task, like summarization.\n\n The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation\n tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation\n Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi\n Zhou, Wei Li, Peter J. Liu.\n\n After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models\n (see the examples for more information).\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Parameters:\n config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' ENCODER_DECODER_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be\n created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`\n and prepending them with the `decoder_start_token_id`.\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.encoder.max_position_embeddings - 1]`.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.decoder.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n If set to `True`, the model will return a [`~utils.FlaxSeq2SeqLMOutput`] instead of a plain tuple.\n' ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING = '\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.encoder.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n If set to `True`, the model will return a [`~utils.FlaxBaseModelOutput`] instead of a plain tuple.\n' ENCODER_DECODER_DECODE_INPUTS_DOCSTRING = '\n Args:\n decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n For sequence to sequence training, `decoder_input_ids` should be provided. `decoder_input_ids` should be\n created outside of the model by shifting the `labels` to the right, replacing -100 by the `pad_token_id`\n and prepending them with the `decoder_start_token_id`.\n encoder_outputs (`tuple(tuple(jnp.ndarray)`):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.decoder.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n If set to `True`, the model will return a [`~utils.FlaxCausalLMOutputWithCrossAttentions`] instead of a\n plain tuple.\n' class FlaxEncoderDecoderModule(nn.Module): config: EncoderDecoderConfig dtype: jnp.dtype = jnp.float32 def setup(self): encoder_config = self.config.encoder decoder_config = self.config.decoder from ...models.auto.modeling_flax_auto import FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_MAPPING encoder_module = FLAX_MODEL_MAPPING[encoder_config.__class__].module_class decoder_module = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING[decoder_config.__class__].module_class self.encoder = encoder_module(encoder_config, dtype=self.dtype) self.decoder = decoder_module(decoder_config, dtype=self.dtype) if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: self.enc_to_dec_proj = nn.Dense(self.decoder.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.decoder.config.initializer_range), dtype=self.dtype) else: self.enc_to_dec_proj = None def _get_encoder_module(self): return self.encoder def _get_projection_module(self): return self.enc_to_dec_proj def _get_decoder_module(self): return self.decoder def __call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True): encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) encoder_hidden_states = encoder_outputs[0] if self.enc_to_dec_proj is not None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqLMOutput(logits=decoder_outputs.logits, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class FlaxEncoderDecoderModel(FlaxPreTrainedModel): config_class = EncoderDecoderConfig base_model_prefix = 'encoder_decoder' module_class = FlaxEncoderDecoderModule def __init__(self, config: EncoderDecoderConfig, input_shape: Optional[Tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): if input_shape is None: input_shape = ((1, 1), (1, 1)) if not _do_init: raise ValueError('`FlaxEncoderDecoderModel` cannot be created without initializing, `_do_init` must be `True`.') if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError(f"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for `config.encoder.hidden_size`.") module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: (encoder_input_shape, decoder_input_shape) = input_shape input_ids = jnp.zeros(encoder_input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) decoder_input_ids = jnp.zeros(decoder_input_shape, dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (decoder_batch_size, decoder_sequence_length) = decoder_input_ids.shape if not decoder_batch_size == batch_size: raise ValueError(f'The inputs of encoder and decoder should have the same batch size, but got {batch_size} for encoder and {decoder_batch_size} for decoder.') decoder_position_ids = jnp.broadcast_to(jnp.arange(decoder_sequence_length)[None, :], (decoder_batch_size, decoder_sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache']) @add_start_docstrings(ENCODER_DECODER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=_CONFIG_FOR_DOC) def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) outputs = self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward) if return_dict: outputs = FlaxBaseModelOutput(last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions) return outputs @add_start_docstrings(ENCODER_DECODER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states, **kwargs): projection_module = module._get_projection_module() decoder_module = module._get_decoder_module() if projection_module is not None: encoder_hidden_states = projection_module(encoder_hidden_states) return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, encoder_hidden_states=encoder_hidden_states, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: (outputs, past) = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past) = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def __call__(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: (batch_size, sequence_length) = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_input_ids is None: raise ValueError('`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must be specified as an input argument.') if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs) def prepare_inputs_for_generation(self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array]=None, decoder_attention_mask: Optional[jax.Array]=None, encoder_outputs=None, **kwargs): (batch_size, seq_length) = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: decoder_position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'encoder_attention_mask': attention_mask, 'decoder_attention_mask': extended_attention_mask, 'decoder_position_ids': decoder_position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['decoder_position_ids'] = model_kwargs['decoder_position_ids'][:, -1:] + 1 return model_kwargs @classmethod def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]]=None, decoder_pretrained_model_name_or_path: Optional[Union[str, os.PathLike]]=None, *model_args, **kwargs) -> FlaxPreTrainedModel: kwargs_encoder = {argument[len('encoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')} for key in kwargs_encoder.keys(): del kwargs['encoder_' + key] for key in kwargs_decoder.keys(): del kwargs['decoder_' + key] encoder = kwargs_encoder.pop('model', None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError('If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_encoder: (encoder_config, kwargs_encoder) = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder['config'] = encoder_config encoder = FlaxAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop('model', None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_decoder: (decoder_config, kwargs_decoder) = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.") decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder['config'] = decoder_config if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False: logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`') decoder = FlaxAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) dtype = kwargs.pop('dtype', jnp.float32) config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) model = cls(config, dtype=dtype) model.params['encoder'] = encoder.params model.params['decoder'] = decoder.params return model # File: transformers-main/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py """""" from __future__ import annotations import inspect import re import warnings from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...modeling_tf_outputs import TFBaseModelOutput, TFSeq2SeqLMOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, get_initializer, keras, unpack_inputs from ...tf_utils import shape_list from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ..auto.configuration_auto import AutoConfig from ..auto.modeling_tf_auto import TFAutoModel, TFAutoModelForCausalLM from .configuration_encoder_decoder import EncoderDecoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'EncoderDecoderConfig' DEPRECATION_WARNING = 'Version v4.17.0 introduces a better way to train encoder-decoder models by computing the loss inside the encoder-decoder framework rather than in the decoder itself. You may observe training discrepancies if fine-tuning a model trained with versions anterior to 4.17.0. The decoder_input_ids are now created based on the labels, no need to pass them yourself anymore.' ENCODER_DECODER_START_DOCSTRING = '\n This class can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the\n encoder and any pretrained autoregressive model as the decoder. The encoder is loaded via\n [`~TFAutoModel.from_pretrained`] function and the decoder is loaded via [`~TFAutoModelForCausalLM.from_pretrained`]\n function. Cross-attention layers are automatically added to the decoder and should be fine-tuned on a downstream\n generative task, like summarization.\n\n The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation\n tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation\n Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi\n Zhou, Wei Li, Peter J. Liu.\n\n After such an Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other models\n (see the examples for more information).\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`EncoderDecoderConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' ENCODER_DECODER_INPUTS_DOCSTRING = "\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n Provide for sequence to sequence training to the decoder. Indices can be obtained using\n [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for\n details.\n decoder_attention_mask (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*):\n This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` (`tf.Tensor` of shape `({0}, hidden_size)`) is a tensor of hidden-states at the output\n of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(tf.Tensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `({0})`.\n inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n decoder_inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert `decoder_input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n labels (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,\n ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:\n\n - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.\n - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function.\n" def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): if pad_token_id is None: raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") pad_token_id = tf.cast(pad_token_id, input_ids.dtype) if decoder_start_token_id is None: raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) shifted_input_ids = tf.where(shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids) assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @add_start_docstrings(ENCODER_DECODER_START_DOCSTRING) class TFEncoderDecoderModel(TFPreTrainedModel, TFCausalLanguageModelingLoss): config_class = EncoderDecoderConfig base_model_prefix = 'encoder_decoder' load_weight_prefix = 'tf_encoder_decoder_model' def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[TFPreTrainedModel]=None, decoder: Optional[TFPreTrainedModel]=None): if config is None and (encoder is None or decoder is None): raise ValueError('Either a configuration or an encoder and a decoder has to be provided.') if config is None: config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config) elif not isinstance(config, self.config_class): raise ValueError(f'config: {config} has to be of type {self.config_class}') if config.decoder.cross_attention_hidden_size is not None: if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: raise ValueError(f"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for `config.encoder.hidden_size`.") super().__init__(config) if encoder is None: encoder = TFAutoModel.from_config(config.encoder, name='encoder') if decoder is None: decoder = TFAutoModelForCausalLM.from_config(config.decoder, name='decoder') self.encoder = encoder self.decoder = decoder if self.encoder.config.to_dict() != self.config.encoder.to_dict(): logger.warning(f'Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}') if self.decoder.config.to_dict() != self.config.decoder.to_dict(): logger.warning(f'Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}') self.encoder.config = self.config.encoder self.decoder.config = self.config.decoder if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: self.enc_to_dec_proj = keras.layers.Dense(units=self.decoder.config.hidden_size, kernel_initializer=get_initializer(config.encoder.initializer_range), name='enc_to_dec_proj') if self.encoder.get_output_embeddings() is not None: raise ValueError(f'The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head') decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys()) if 'encoder_hidden_states' not in decoder_signature: raise ValueError('The selected decoder is not prepared for the encoder hidden states to be passed. Please see the following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350') def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def get_input_embeddings(self): return self.encoder.get_input_embeddings() def get_output_embeddings(self): return self.decoder.get_output_embeddings() def set_output_embeddings(self, new_embeddings): return self.decoder.set_output_embeddings(new_embeddings) def tf_to_pt_weight_rename(self, tf_weight): encoder_model_type = self.config.encoder.model_type if 'encoder' in tf_weight and 'decoder' not in tf_weight: return (re.sub(f'encoder\\.{encoder_model_type}\\.', 'encoder.', tf_weight),) else: return (tf_weight,) @classmethod def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: str=None, decoder_pretrained_model_name_or_path: str=None, *model_args, **kwargs) -> TFPreTrainedModel: kwargs_encoder = {argument[len('encoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('encoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')} for key in kwargs_encoder.keys(): del kwargs['encoder_' + key] for key in kwargs_decoder.keys(): del kwargs['decoder_' + key] encoder = kwargs_encoder.pop('model', None) if encoder is None: if encoder_pretrained_model_name_or_path is None: raise ValueError('If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_encoder: encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled.') encoder_config.is_decoder = False encoder_config.add_cross_attention = False kwargs_encoder['config'] = encoder_config kwargs_encoder['name'] = 'encoder' kwargs_encoder['load_weight_prefix'] = cls.load_weight_prefix encoder = TFAutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder) decoder = kwargs_decoder.pop('model', None) if decoder is None: if decoder_pretrained_model_name_or_path is None: raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.') if 'config' not in kwargs_decoder: decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.") decoder_config.is_decoder = True decoder_config.add_cross_attention = True kwargs_decoder['config'] = decoder_config if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False: logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`') kwargs_decoder['name'] = 'decoder' kwargs_decoder['load_weight_prefix'] = cls.load_weight_prefix decoder = TFAutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder) if encoder.name != 'encoder': raise ValueError('encoder model must be created with the name `encoder`.') if decoder.name != 'decoder': raise ValueError('decoder model must be created with the name `decoder`.') config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs) return cls(encoder=encoder, decoder=decoder, config=config) @unpack_inputs @add_start_docstrings_to_model_forward(ENCODER_DECODER_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, decoder_input_ids: np.ndarray | tf.Tensor | None=None, decoder_attention_mask: np.ndarray | tf.Tensor | None=None, encoder_outputs: np.ndarray | tf.Tensor | None=None, past_key_values: Tuple[Tuple[tf.Tensor]] | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, decoder_inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False, **kwargs) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict kwargs_encoder = {argument: value for (argument, value) in kwargs.items() if not argument.startswith('decoder_')} kwargs_decoder = {argument[len('decoder_'):]: value for (argument, value) in kwargs.items() if argument.startswith('decoder_')} if encoder_outputs is not None: if return_dict and (not isinstance(encoder_outputs, ModelOutput)): raise ValueError(f'If `return_dict=True` and `encoder_outputs` is provided, it should be an instance of `ModelOutput`. Got an instance {type(encoder_outputs)} for `encoder_outputs`.') if encoder_outputs is None: encoder_inputs = {'input_ids': input_ids, 'attention_mask': attention_mask, 'inputs_embeds': inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'return_dict': return_dict, 'training': training} encoder_inputs.update(kwargs_encoder) if 'labels' in encoder_inputs: labels = encoder_inputs.pop('labels') if 'decoder_input_ids' in encoder_inputs: decoder_input_ids = encoder_inputs.pop('decoder_input_ids') if 'decoder_attention_mask' in encoder_inputs: decoder_attention_mask = encoder_inputs.pop('decoder_attention_mask') encoder_outputs = self.encoder(**encoder_inputs) encoder_hidden_states = encoder_outputs[0] if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None: encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None): decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) decoder_inputs = {'input_ids': decoder_input_ids, 'attention_mask': decoder_attention_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': attention_mask, 'inputs_embeds': decoder_inputs_embeds, 'output_attentions': output_attentions, 'output_hidden_states': output_hidden_states, 'use_cache': use_cache, 'past_key_values': past_key_values, 'return_dict': return_dict, 'training': training} decoder_inputs.update(kwargs_decoder) decoder_outputs = self.decoder(**decoder_inputs) logits = decoder_outputs[0] loss = None if labels is not None: warnings.warn(DEPRECATION_WARNING, FutureWarning) loss = self.hf_compute_loss(labels, logits) if not return_dict: past_key_values = None if use_cache: past_key_values = decoder_outputs[1] start_index = sum([1 if x is not None else 0 for x in (loss, logits, past_key_values)]) if not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() output = (loss, logits, past_key_values) + decoder_outputs[start_index:] + encoder_outputs output = tuple([x for x in output if x is not None]) return output return TFSeq2SeqLMOutput(loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs): decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values) decoder_attention_mask = decoder_inputs['attention_mask'] if 'attention_mask' in decoder_inputs else None past_key_values = decoder_inputs.get('past_key_values') if past_key_values is None: past_key_values = decoder_inputs.get('past') input_dict = {'input_ids': None, 'attention_mask': attention_mask, 'decoder_attention_mask': decoder_attention_mask, 'decoder_input_ids': decoder_inputs['input_ids'], 'encoder_outputs': TFBaseModelOutput(last_hidden_state=encoder_outputs[0]), 'past_key_values': past_key_values, 'use_cache': use_cache} return input_dict def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) def resize_token_embeddings(self, *args, **kwargs): raise NotImplementedError('Resizing the embedding layers via the TFEncoderDecoderModel directly is not supported.Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))') def _reorder_cache(self, past, beam_idx): return self.decoder._reorder_cache(past, beam_idx) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'enc_to_dec_proj', None) is not None: with tf.name_scope(self.enc_to_dec_proj.name): self.enc_to_dec_proj.build([None, None, self.encoder.config.hidden_size]) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) # File: transformers-main/src/transformers/models/ernie/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _import_structure = {'configuration_ernie': ['ErnieConfig', 'ErnieOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_ernie'] = ['ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel'] if TYPE_CHECKING: from .configuration_ernie import ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/ernie/configuration_ernie.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class ErnieConfig(PretrainedConfig): model_type = 'ernie' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, task_type_vocab_size=3, use_task_id=False, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.task_type_vocab_size = task_type_vocab_size self.use_task_id = use_task_id self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class ErnieOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ('task_type_ids', dynamic_axis)]) # File: transformers-main/src/transformers/models/ernie/modeling_ernie.py """""" import math import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_ernie import ErnieConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'nghuyong/ernie-1.0-base-zh' _CONFIG_FOR_DOC = 'ErnieConfig' class ErnieEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.use_task_id = config.use_task_id if config.use_task_id: self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, task_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if self.use_task_id: if task_type_ids is None: task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) task_type_embeddings = self.task_type_embeddings(task_type_ids) embeddings += task_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class ErnieSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class ErnieSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ERNIE_SELF_ATTENTION_CLASSES = {'eager': ErnieSelfAttention} class ErnieAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ERNIE_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type) self.output = ErnieSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class ErnieIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ErnieOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class ErnieLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ErnieAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = ErnieAttention(config, position_embedding_type='absolute') self.intermediate = ErnieIntermediate(config) self.output = ErnieOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ErnieEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class ErniePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ErniePredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class ErnieLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = ErniePredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class ErnieOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = ErnieLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class ErnieOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class ErniePreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = ErnieLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class ErniePreTrainedModel(PreTrainedModel): config_class = ErnieConfig base_model_prefix = 'ernie' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class ErnieForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ERNIE_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ErnieConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ERNIE_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Task type embedding is a special embedding to represent the characteristic of different tasks, such as\n word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We\n assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,\n config.task_type_vocab_size-1]\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.', ERNIE_START_DOCSTRING) class ErnieModel(ErniePreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ErnieEmbeddings(config) self.encoder = ErnieEncoder(config) self.pooler = ErniePooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, task_type_ids=task_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', ERNIE_START_DOCSTRING) class ErnieForPreTraining(ErniePreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.ernie = ErnieModel(config) self.cls = ErniePreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] (prediction_scores, seq_relationship_score) = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return ErnieForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('Ernie Model with a `language modeling` head on top for CLM fine-tuning.', ERNIE_START_DOCSTRING) class ErnieForCausalLM(ErniePreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning('If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`') self.ernie = ErnieModel(config, add_pooling_layer=False) self.cls = ErnieOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=True, **model_kwargs): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past_key_values is not None: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] return {'input_ids': input_ids, 'attention_mask': attention_mask, 'past_key_values': past_key_values, 'use_cache': use_cache} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past @add_start_docstrings('Ernie Model with a `language modeling` head on top.', ERNIE_START_DOCSTRING) class ErnieForMaskedLM(ErniePreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.ernie = ErnieModel(config, add_pooling_layer=False) self.cls = ErnieOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.88) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] if self.config.pad_token_id is None: raise ValueError('The PAD token should be defined for generation') attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {'input_ids': input_ids, 'attention_mask': attention_mask} @add_start_docstrings('Ernie Model with a `next sentence prediction (classification)` head on top.', ERNIE_START_DOCSTRING) class ErnieForNextSentencePrediction(ErniePreTrainedModel): def __init__(self, config): super().__init__(config) self.ernie = ErnieModel(config) self.cls = ErnieOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ERNIE_START_DOCSTRING) class ErnieForSequenceClassification(ErniePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.ernie = ErnieModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', ERNIE_START_DOCSTRING) class ErnieForMultipleChoice(ErniePreTrainedModel): def __init__(self, config): super().__init__(config) self.ernie = ErnieModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ERNIE_START_DOCSTRING) class ErnieForTokenClassification(ErniePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ernie = ErnieModel(config, add_pooling_layer=False) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', ERNIE_START_DOCSTRING) class ErnieForQuestionAnswering(ErniePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ernie = ErnieModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, task_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ernie(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task_type_ids=task_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/esm/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {'configuration_esm': ['EsmConfig'], 'tokenization_esm': ['EsmTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_esm'] = ['EsmForMaskedLM', 'EsmForSequenceClassification', 'EsmForTokenClassification', 'EsmModel', 'EsmPreTrainedModel'] _import_structure['modeling_esmfold'] = ['EsmForProteinFolding', 'EsmFoldPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_esm'] = ['TFEsmForMaskedLM', 'TFEsmForSequenceClassification', 'TFEsmForTokenClassification', 'TFEsmModel', 'TFEsmPreTrainedModel'] if TYPE_CHECKING: from .configuration_esm import EsmConfig from .tokenization_esm import EsmTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_esm import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel, EsmPreTrainedModel from .modeling_esmfold import EsmFoldPreTrainedModel, EsmForProteinFolding try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_esm import TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, TFEsmPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/esm/configuration_esm.py """""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class EsmConfig(PretrainedConfig): model_type = 'esm' def __init__(self, vocab_size=None, mask_token_id=None, pad_token_id=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1026, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_cache=True, emb_layer_norm_before=None, token_dropout=False, is_folding_model=False, esmfold_config=None, vocab_list=None, **kwargs): super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.emb_layer_norm_before = emb_layer_norm_before self.token_dropout = token_dropout self.is_folding_model = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('No esmfold_config supplied for folding model, using default values.') esmfold_config = EsmFoldConfig() elif isinstance(esmfold_config, dict): esmfold_config = EsmFoldConfig(**esmfold_config) self.esmfold_config = esmfold_config if vocab_list is None: logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!') self.vocab_list = get_default_vocab_list() else: self.vocab_list = vocab_list else: self.esmfold_config = None self.vocab_list = None if self.esmfold_config is not None and getattr(self.esmfold_config, 'use_esm_attn_map', False): raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!') def to_dict(self): output = super().to_dict() if isinstance(self.esmfold_config, EsmFoldConfig): output['esmfold_config'] = self.esmfold_config.to_dict() return output @dataclass class EsmFoldConfig: esm_type: str = None fp16_esm: bool = True use_esm_attn_map: bool = False esm_ablate_pairwise: bool = False esm_ablate_sequence: bool = False esm_input_dropout: float = 0 embed_aa: bool = True bypass_lm: bool = False lddt_head_hid_dim: int = 128 trunk: 'TrunkConfig' = None def __post_init__(self): if self.trunk is None: self.trunk = TrunkConfig() elif isinstance(self.trunk, dict): self.trunk = TrunkConfig(**self.trunk) def to_dict(self): output = asdict(self) output['trunk'] = self.trunk.to_dict() return output @dataclass class TrunkConfig: num_blocks: int = 48 sequence_state_dim: int = 1024 pairwise_state_dim: int = 128 sequence_head_width: int = 32 pairwise_head_width: int = 32 position_bins: int = 32 dropout: float = 0 layer_drop: float = 0 cpu_grad_checkpoint: bool = False max_recycles: int = 4 chunk_size: Optional[int] = 128 structure_module: 'StructureModuleConfig' = None def __post_init__(self): if self.structure_module is None: self.structure_module = StructureModuleConfig() elif isinstance(self.structure_module, dict): self.structure_module = StructureModuleConfig(**self.structure_module) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.') if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError(f'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got {self.sequence_state_dim} and {self.sequence_state_dim}.') if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError(f'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got {self.pairwise_state_dim} and {self.pairwise_state_dim}.') sequence_num_heads = self.sequence_state_dim // self.sequence_head_width pairwise_num_heads = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError(f'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.') if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError(f'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.') if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.') if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.') def to_dict(self): output = asdict(self) output['structure_module'] = self.structure_module.to_dict() return output @dataclass class StructureModuleConfig: sequence_dim: int = 384 pairwise_dim: int = 128 ipa_dim: int = 16 resnet_dim: int = 128 num_heads_ipa: int = 12 num_qk_points: int = 4 num_v_points: int = 8 dropout_rate: float = 0.1 num_blocks: int = 8 num_transition_layers: int = 1 num_resnet_blocks: int = 2 num_angles: int = 7 trans_scale_factor: int = 10 epsilon: float = 1e-08 inf: float = 100000.0 def to_dict(self): return asdict(self) def get_default_vocab_list(): return ('', '', '', '', 'L', 'A', 'G', 'V', 'S', 'E', 'R', 'T', 'I', 'D', 'P', 'K', 'Q', 'N', 'F', 'Y', 'M', 'H', 'W', 'C', 'X', 'B', 'U', 'Z', 'O', '.', '-', '', '') # File: transformers-main/src/transformers/models/esm/convert_esm.py """""" import argparse import pathlib from pathlib import Path from tempfile import TemporaryDirectory import esm as esm_module import torch from esm.esmfold.v1.misc import batch_encode_sequences as esmfold_encode_sequences from esm.esmfold.v1.pretrained import esmfold_v1 from transformers.models.esm.configuration_esm import EsmConfig, EsmFoldConfig from transformers.models.esm.modeling_esm import EsmForMaskedLM, EsmForSequenceClassification, EsmIntermediate, EsmLayer, EsmOutput, EsmSelfAttention, EsmSelfOutput from transformers.models.esm.modeling_esmfold import EsmForProteinFolding from transformers.models.esm.tokenization_esm import EsmTokenizer from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_DATA = [('protein1', 'MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEPWQFSMLAAYMFLLIVLGFPINFLTLYVTVQHKKLRTPLNYILLNLAVADLFMVLGGFTSTLYTSLHGYFVFGPTGCNLEGFFATLGGEIALWSLVVLAIERYVVVCKPMSNFRFGENHAIMGVAFTWVMALACAAPPLAGWSRYIPEGLQCSCGIDYYTLKPEVNNESFVIYMFVVHFTIPMIIIFFCYGQLVFTVKEAAAQQQESATTQKAEKEVTRMVIIMVIAFLICWVPYASVAFYIFTHQGSNFGPIFMTIPAFFAKSAAIYNPVIYIMMNKQFRNCMLTTICCGKNPLGDDEASATVSKTETSQVAPA'), ('protein2', 'MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA'), ('protein3', 'MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLAGG'), ('protein4', 'MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLA')] MODEL_MAPPING = {'esm1b_t33_650M_UR50S': esm_module.pretrained.esm1b_t33_650M_UR50S, 'esm1v_t33_650M_UR90S_1': esm_module.pretrained.esm1v_t33_650M_UR90S_1, 'esm1v_t33_650M_UR90S_2': esm_module.pretrained.esm1v_t33_650M_UR90S_2, 'esm1v_t33_650M_UR90S_3': esm_module.pretrained.esm1v_t33_650M_UR90S_3, 'esm1v_t33_650M_UR90S_4': esm_module.pretrained.esm1v_t33_650M_UR90S_4, 'esm1v_t33_650M_UR90S_5': esm_module.pretrained.esm1v_t33_650M_UR90S_5, 'esm2_t48_15B_UR50D': esm_module.pretrained.esm2_t48_15B_UR50D, 'esm2_t36_3B_UR50D': esm_module.pretrained.esm2_t36_3B_UR50D, 'esm2_t33_650M_UR50D': esm_module.pretrained.esm2_t33_650M_UR50D, 'esm2_t30_150M_UR50D': esm_module.pretrained.esm2_t30_150M_UR50D, 'esm2_t12_35M_UR50D': esm_module.pretrained.esm2_t12_35M_UR50D, 'esm2_t6_8M_UR50D': esm_module.pretrained.esm2_t6_8M_UR50D, 'esmfold_v1': esmfold_v1} restypes = list('ARNDCQEGHILKMFPSTWYV') restypes_with_x = restypes + ['X'] restypes_with_extras = restypes_with_x + ['', '', '', '', ''] def get_esmfold_tokenizer(): with TemporaryDirectory() as tempdir: vocab = '\n'.join(restypes_with_extras) vocab_file = Path(tempdir) / 'vocab.txt' vocab_file.write_text(vocab) hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file)) hf_tokenizer.pad_token_id = 0 return hf_tokenizer def transfer_and_check_weights(original_module, our_module): status = our_module.load_state_dict(original_module.state_dict()) if status.missing_keys: raise ValueError(f'Missing keys: {status.missing_keys}') if status.unexpected_keys: raise ValueError(f'Unexpected keys: {status.unexpected_keys}') def convert_esm_checkpoint_to_pytorch(model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str): if model.startswith('esmfold'): esm = MODEL_MAPPING[model]() else: (esm, alphabet) = MODEL_MAPPING[model]() esm.eval() if model.startswith('esmfold'): embed_dim = esm.esm.embed_dim num_layers = esm.esm.num_layers num_attention_heads = esm.esm.attention_heads intermediate_size = 4 * embed_dim token_dropout = esm.esm.token_dropout emb_layer_norm_before = False position_embedding_type = 'rotary' is_folding_model = True esmfold_config = EsmFoldConfig() for (key, val) in esm.cfg.items(): if hasattr(esmfold_config, key) and key != 'trunk': setattr(esmfold_config, key, val) for (key, val) in esm.cfg.trunk.items(): if hasattr(esmfold_config.trunk, key) and key != 'structure_module': setattr(esmfold_config.trunk, key, val) for (key, val) in esm.cfg.trunk.structure_module.items(): if hasattr(esmfold_config.trunk.structure_module, key): setattr(esmfold_config.trunk.structure_module, key, val) elif hasattr(esm, 'args'): embed_dim = esm.args.embed_dim num_layers = esm.args.layers num_attention_heads = esm.args.attention_heads intermediate_size = esm.args.ffn_embed_dim token_dropout = esm.args.token_dropout emb_layer_norm_before = True if esm.emb_layer_norm_before else False position_embedding_type = 'absolute' is_folding_model = False esmfold_config = None else: embed_dim = esm.embed_dim num_layers = esm.num_layers num_attention_heads = esm.attention_heads intermediate_size = 4 * embed_dim token_dropout = esm.token_dropout emb_layer_norm_before = False position_embedding_type = 'rotary' is_folding_model = False esmfold_config = None if is_folding_model: alphabet = esm.esm.alphabet vocab_list = tuple(alphabet.all_toks) mask_token_id = alphabet.mask_idx pad_token_id = alphabet.padding_idx if is_folding_model: original_esm_model = esm.esm else: original_esm_model = esm config = EsmConfig(vocab_size=original_esm_model.embed_tokens.num_embeddings, mask_token_id=mask_token_id, hidden_size=embed_dim, num_hidden_layers=num_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, max_position_embeddings=1026, layer_norm_eps=1e-05, attention_probs_dropout_prob=0.0, hidden_dropout_prob=0.0, pad_token_id=pad_token_id, emb_layer_norm_before=emb_layer_norm_before, token_dropout=token_dropout, position_embedding_type=position_embedding_type, is_folding_model=is_folding_model, esmfold_config=esmfold_config, vocab_list=vocab_list) if classification_head: config.num_labels = esm.classification_heads['mnli'].out_proj.weight.shape[0] print('Our ESM config:', config) if model.startswith('esmfold'): model_class = EsmForProteinFolding elif classification_head: model_class = EsmForSequenceClassification else: model_class = EsmForMaskedLM model = model_class(config) model.eval() model.esm.embeddings.word_embeddings.weight = original_esm_model.embed_tokens.weight if position_embedding_type == 'absolute': model.esm.embeddings.position_embeddings.weight = original_esm_model.embed_positions.weight if config.emb_layer_norm_before: model.esm.embeddings.layer_norm.weight = original_esm_model.emb_layer_norm_before.weight model.esm.embeddings.layer_norm.bias = original_esm_model.emb_layer_norm_before.bias model.esm.encoder.emb_layer_norm_after.weight = original_esm_model.emb_layer_norm_after.weight model.esm.encoder.emb_layer_norm_after.bias = original_esm_model.emb_layer_norm_after.bias for i in range(config.num_hidden_layers): layer: EsmLayer = model.esm.encoder.layer[i] esm_layer = original_esm_model.layers[i] self_attn: EsmSelfAttention = layer.attention.self assert esm_layer.self_attn.k_proj.weight.data.shape == esm_layer.self_attn.q_proj.weight.data.shape == esm_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias if getattr(esm_layer.self_attn, 'rot_emb', None) is not None: self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias layer.LayerNorm.weight = esm_layer.final_layer_norm.weight layer.LayerNorm.bias = esm_layer.final_layer_norm.bias self_output: EsmSelfOutput = layer.attention.output assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape self_output.dense.weight = esm_layer.self_attn.out_proj.weight self_output.dense.bias = esm_layer.self_attn.out_proj.bias intermediate: EsmIntermediate = layer.intermediate assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape intermediate.dense.weight = esm_layer.fc1.weight intermediate.dense.bias = esm_layer.fc1.bias bert_output: EsmOutput = layer.output assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape bert_output.dense.weight = esm_layer.fc2.weight bert_output.dense.bias = esm_layer.fc2.bias if is_folding_model: model.esm_s_combine.data = esm.esm_s_combine.data model.af2_to_esm.data = esm.af2_to_esm.data transfer_and_check_weights(esm.embedding, model.embedding) transfer_and_check_weights(esm.esm_s_mlp, model.esm_s_mlp) transfer_and_check_weights(esm.trunk, model.trunk) transfer_and_check_weights(esm.distogram_head, model.distogram_head) transfer_and_check_weights(esm.ptm_head, model.ptm_head) transfer_and_check_weights(esm.lm_head, model.lm_head) transfer_and_check_weights(esm.lddt_head, model.lddt_head) elif classification_head: model.classifier.dense.weight = esm.esm.classification_heads['mnli'].dense.weight model.classifier.dense.bias = esm.classification_heads['mnli'].dense.bias model.classifier.out_proj.weight = esm.classification_heads['mnli'].out_proj.weight model.classifier.out_proj.bias = esm.classification_heads['mnli'].out_proj.bias else: model.lm_head.dense.weight = esm.lm_head.dense.weight model.lm_head.dense.bias = esm.lm_head.dense.bias model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias model.lm_head.decoder.weight = esm.lm_head.weight model.lm_head.bias = esm.lm_head.bias transfer_and_check_weights(esm.contact_head, model.esm.contact_head) if is_folding_model: sample_data = SAMPLE_DATA[:2] else: sample_data = SAMPLE_DATA if is_folding_model: hf_tokenizer = get_esmfold_tokenizer() hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors='pt', padding=True, add_special_tokens=False) (esmfold_aas, esmfold_mask, _, _, _) = esmfold_encode_sequences([row[1] for row in sample_data]) success = torch.all(hf_tokens['input_ids'] == esmfold_aas) and torch.all(hf_tokens['attention_mask'] == esmfold_mask) else: batch_converter = alphabet.get_batch_converter() (batch_labels, batch_strs, batch_tokens) = batch_converter(sample_data) with TemporaryDirectory() as tempdir: vocab = '\n'.join(alphabet.all_toks) vocab_file = Path(tempdir) / 'vocab.txt' vocab_file.write_text(vocab) hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file)) hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors='pt', padding=True) success = torch.all(hf_tokens['input_ids'] == batch_tokens) print('Do both models tokenizers output the same tokens?', '🔥' if success else '💩') if not success: raise Exception('Tokenization does not match!') with torch.no_grad(): if is_folding_model: their_output = esm.cuda().infer([row[1] for row in sample_data]) our_output = model.cuda()(input_ids=hf_tokens['input_ids'].cuda(), attention_mask=hf_tokens['attention_mask'].cuda()) else: our_output = model(**hf_tokens, output_hidden_states=True) our_output = our_output['logits'] if classification_head: their_output = esm.model.classification_heads['mnli'](esm.extract_features(batch_tokens)) else: their_output = esm(hf_tokens['input_ids'], repr_layers=list(range(999))) their_output = their_output['logits'] if is_folding_model: max_absolute_diff = torch.max(torch.abs(our_output['positions'] - their_output['positions'])).item() success = torch.allclose(our_output['positions'], their_output['positions'], atol=1e-05) else: max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() success = torch.allclose(our_output, their_output, atol=1e-05) print(f'max_absolute_diff = {max_absolute_diff}') print('Do both models output the same tensors?', '🔥' if success else '💩') if not success: raise Exception('Something went wRoNg') if not is_folding_model: our_output = model.predict_contacts(hf_tokens['input_ids'], hf_tokens['attention_mask']) their_output = esm.predict_contacts(hf_tokens['input_ids']) max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() success = torch.allclose(our_output, their_output, atol=1e-05) print('Contact prediction testing:') print(f'max_absolute_diff = {max_absolute_diff}') print('Do both models output the same tensors?', '🔥' if success else '💩') if not success: raise Exception('Something went wRoNg') pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) del esm print(f'Saving tokenizer to {pytorch_dump_folder_path}') hf_tokenizer.save_pretrained(pytorch_dump_folder_path) if push_to_repo: model.push_to_hub(repo_id=push_to_repo, token_token=auth_token) hf_tokenizer.push_to_hub(repo_id=push_to_repo, token_token=auth_token) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--classification_head', action='store_true', help='Whether to convert a final classification head.') parser.add_argument('--model', default=None, type=str, required=True, help='Name of model to convert.') parser.add_argument('--push_to_repo', type=str, help='Repo to upload to (including username!).') parser.add_argument('--auth_token', type=str, help='HuggingFace auth token.') args = parser.parse_args() convert_esm_checkpoint_to_pytorch(args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token) # File: transformers-main/src/transformers/models/esm/modeling_esm.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import logging from .configuration_esm import EsmConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/esm2_t6_8M_UR50D' _CONFIG_FOR_DOC = 'EsmConfig' def rotate_half(x): (x1, x2) = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(x, cos, sin): cos = cos[:, :, :x.shape[-2], :] sin = sin[:, :, :x.shape[-2], :] return x * cos + rotate_half(x) * sin def gelu(x): return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def symmetrize(x): return x + x.transpose(-1, -2) def average_product_correct(x): a1 = x.sum(-1, keepdims=True) a2 = x.sum(-2, keepdims=True) a12 = x.sum((-1, -2), keepdims=True) avg = a1 * a2 avg.div_(a12) normalized = x - avg return normalized class RotaryEmbedding(torch.nn.Module): def __init__(self, dim: int): super().__init__() inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim) inv_freq = inv_freq self.register_buffer('inv_freq', inv_freq) self._seq_len_cached = None self._cos_cached = None self._sin_cached = None def _update_cos_sin_tables(self, x, seq_dimension=2): seq_len = x.shape[seq_dimension] if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: self._seq_len_cached = seq_len t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self._cos_cached = emb.cos()[None, None, :, :] self._sin_cached = emb.sin()[None, None, :, :] return (self._cos_cached, self._sin_cached) def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: (self._cos_cached, self._sin_cached) = self._update_cos_sin_tables(k, seq_dimension=-2) return (apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached), apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached)) class EsmContactPredictionHead(nn.Module): def __init__(self, in_features: int, bias=True, eos_idx: int=2): super().__init__() self.in_features = in_features self.eos_idx = eos_idx self.regression = nn.Linear(in_features, 1, bias) self.activation = nn.Sigmoid() def forward(self, tokens, attentions): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1] attentions = attentions[..., 1:, 1:] (batch_size, layers, heads, seqlen, _) = attentions.size() attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) attentions = attentions.to(self.regression.weight.device) attentions = average_product_correct(symmetrize(attentions)) attentions = attentions.permute(0, 2, 3, 1) return self.activation(self.regression(attentions).squeeze(3)) class EsmEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) if config.emb_layer_norm_before: self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) else: self.layer_norm = None self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) self.token_dropout = config.token_dropout self.mask_token_id = config.mask_token_id def forward(self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.token_dropout: embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0) mask_ratio_train = 0.15 * 0.8 src_lengths = attention_mask.sum(-1) mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(embeddings.dtype) if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if self.layer_norm is not None: embeddings = self.layer_norm(embeddings) if attention_mask is not None: embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) class EsmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') self.rotary_embeddings = None if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) elif self.position_embedding_type == 'rotary': self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) query_layer = query_layer * self.attention_head_size ** (-0.5) if self.is_decoder: past_key_value = (key_layer, value_layer) if self.position_embedding_type == 'rotary': (query_layer, key_layer) = self.rotary_embeddings(query_layer, key_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs.to(value_layer.dtype), value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class EsmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class EsmAttention(nn.Module): def __init__(self, config): super().__init__() self.self = EsmSelfAttention(config) self.output = EsmSelfOutput(config) self.pruned_heads = set() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): hidden_states_ln = self.LayerNorm(hidden_states) self_outputs = self.self(hidden_states_ln, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class EsmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = gelu(hidden_states) return hidden_states class EsmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class EsmLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = EsmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise RuntimeError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = EsmAttention(config) self.intermediate = EsmIntermediate(config) self.output = EsmOutput(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise AttributeError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = self.feed_forward_chunk(attention_output) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): attention_output_ln = self.LayerNorm(attention_output) intermediate_output = self.intermediate(attention_output_ln) layer_output = self.output(intermediate_output, attention_output) return layer_output class EsmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)]) self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True): if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = next_decoder_cache + (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.emb_layer_norm_after: hidden_states = self.emb_layer_norm_after(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class EsmPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class EsmPreTrainedModel(PreTrainedModel): config_class = EsmConfig base_model_prefix = 'esm' supports_gradient_checkpointing = True _no_split_modules = ['EsmLayer', 'EsmFoldTriangularSelfAttentionBlock', 'EsmEmbeddings'] def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ESM_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`EsmConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' ESM_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare ESM Model transformer outputting raw hidden-states without any specific head on top.', ESM_START_DOCSTRING) class EsmModel(EsmPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = EsmEmbeddings(config) self.encoder = EsmEncoder(config) self.pooler = EsmPooler(config) if add_pooling_layer else None self.contact_head = EsmContactPredictionHead(in_features=config.num_hidden_layers * config.num_attention_heads, bias=True) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) if self.config.is_decoder and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) def predict_contacts(self, tokens, attention_mask): attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions attns = torch.stack(attns, dim=1) attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3) attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4) return self.contact_head(tokens, attns) @add_start_docstrings('ESM Model with a `language modeling` head on top.', ESM_START_DOCSTRING) class EsmForMaskedLM(EsmPreTrainedModel): _tied_weights_keys = ['lm_head.decoder.weight'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.esm = EsmModel(config, add_pooling_layer=False) self.lm_head = EsmLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='') def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(prediction_scores.device) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask=attention_mask) class EsmLMHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) + self.bias return x @add_start_docstrings('\n ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ESM_START_DOCSTRING) class EsmForSequenceClassification(EsmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.esm = EsmModel(config, add_pooling_layer=False) self.classifier = EsmClassificationHead(config) self.init_weights() @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ESM_START_DOCSTRING) class EsmForTokenClassification(EsmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.esm = EsmModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class EsmClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # File: transformers-main/src/transformers/models/esm/modeling_esmfold.py import math import sys from dataclasses import dataclass from functools import partial from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn as nn from torch.nn import LayerNorm from ...integrations.deepspeed import is_deepspeed_available from ...modeling_outputs import ModelOutput from ...utils import ContextManagers, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, logging, replace_return_docstrings from .configuration_esm import EsmConfig from .modeling_esm import ESM_START_DOCSTRING, EsmModel, EsmPreTrainedModel from .openfold_utils import OFProtein, Rigid, Rotation, atom14_to_atom37, chunk_layer, compute_predicted_aligned_error, compute_tm, frames_and_literature_positions_to_atom14_pos, make_atom14_masks, residue_constants, to_pdb, torsion_angles_to_frames logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/esmfold_v1' _CONFIG_FOR_DOC = 'EsmConfig' @dataclass class EsmForProteinFoldingOutput(ModelOutput): frames: torch.FloatTensor = None sidechain_frames: torch.FloatTensor = None unnormalized_angles: torch.FloatTensor = None angles: torch.FloatTensor = None positions: torch.FloatTensor = None states: torch.FloatTensor = None s_s: torch.FloatTensor = None s_z: torch.FloatTensor = None distogram_logits: torch.FloatTensor = None lm_logits: torch.FloatTensor = None aatype: torch.FloatTensor = None atom14_atom_exists: torch.FloatTensor = None residx_atom14_to_atom37: torch.FloatTensor = None residx_atom37_to_atom14: torch.FloatTensor = None atom37_atom_exists: torch.FloatTensor = None residue_index: torch.FloatTensor = None lddt_head: torch.FloatTensor = None plddt: torch.FloatTensor = None ptm_logits: torch.FloatTensor = None ptm: torch.FloatTensor = None aligned_confidence_probs: torch.FloatTensor = None predicted_aligned_error: torch.FloatTensor = None max_predicted_aligned_error: torch.FloatTensor = None ESMFOLD_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n masking_pattern (`torch.LongTensor` of shape `({0})`, *optional*):\n Locations of tokens to mask during training as a form of regularization. Mask values selected in `[0, 1]`.\n num_recycles (`int`, *optional*, defaults to `None`):\n Number of times to recycle the input sequence. If `None`, defaults to `config.num_recycles`. "Recycling"\n consists of passing the output of the folding trunk back in as input to the trunk. During training, the\n number of recycles should vary with each batch, to ensure that the model learns to output valid predictions\n after each recycle. During inference, num_recycles should be set to the highest value that the model was\n trained with for maximum accuracy. Accordingly, when this value is set to `None`, config.max_recycles is\n used.\n' def is_fp16_enabled(): fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16 fp16_enabled = fp16_enabled and torch.is_autocast_enabled() return fp16_enabled def is_deepspeed_initialized(): if is_deepspeed_available(): return False else: try: import deepspeed return deepspeed.utils.is_initialized() except Exception: return False def collate_dense_tensors(samples: List[torch.Tensor], pad_v: float=0) -> torch.Tensor: if len(samples) == 0: return torch.Tensor() if len({x.dim() for x in samples}) != 1: raise RuntimeError(f'Samples has varying dimensions: {[x.dim() for x in samples]}') (device,) = tuple({x.device for x in samples}) max_shape = [max(lst) for lst in zip(*[x.shape for x in samples])] result = torch.empty(len(samples), *max_shape, dtype=samples[0].dtype, device=device) result.fill_(pad_v) for i in range(len(samples)): result_i = result[i] t = samples[i] result_i[tuple((slice(0, k) for k in t.shape))] = t return result def flatten_final_dims(t: torch.Tensor, no_dims: int): return t.reshape(t.shape[:-no_dims] + (-1,)) def permute_final_dims(tensor: torch.Tensor, inds: List[int]): zero_index = -1 * len(inds) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute(first_inds + [zero_index + i for i in inds]) def dict_multimap(fn, dicts): first = dicts[0] new_dict = {} for (k, v) in first.items(): all_v = [d[k] for d in dicts] if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) return new_dict def trunc_normal_init_(weights, scale=1.0, fan='fan_in'): shape = weights.shape scale = scale / max(1, shape[1]) if not is_scipy_available(): logger.warning('This init requires scipy, but scipy was not found, default to an approximation that might not be equivalent.') std = math.sqrt(scale) torch.nn.init.normal_(weights, std=std).clamp(min=0.0, max=2.0 * std) else: from scipy.stats import truncnorm std = math.sqrt(scale) / truncnorm.std(a=-2, b=2, loc=0, scale=1) samples = truncnorm.rvs(a=-2, b=2, loc=0, scale=std, size=weights.numel()) samples = np.reshape(samples, shape) weights.copy_(torch.tensor(samples, device=weights.device)) def ipa_point_weights_init_(weights): with torch.no_grad(): softplus_inverse_1 = 0.541324854612918 weights.fill_(softplus_inverse_1) class EsmFoldLinear(nn.Linear): def __init__(self, in_dim: int, out_dim: int, bias: bool=True, init: str='default', init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]]=None): super().__init__(in_dim, out_dim, bias=bias) if bias: with torch.no_grad(): self.bias.fill_(0) self.init = init self.init_fn = init_fn if init not in ['default', 'relu', 'glorot', 'gating', 'normal', 'final']: raise ValueError('Invalid init string.') class EsmFoldLayerNorm(nn.Module): def __init__(self, c_in, eps=1e-05): super().__init__() self.c_in = (c_in,) self.eps = eps self.weight = nn.Parameter(torch.ones(c_in)) self.bias = nn.Parameter(torch.zeros(c_in)) def forward(self, x): d = x.dtype if d is torch.bfloat16 and (not is_deepspeed_initialized()): with torch.cuda.amp.autocast(enabled=False): out = nn.functional.layer_norm(x, self.c_in, self.weight.to(dtype=d), self.bias.to(dtype=d), self.eps) else: out = nn.functional.layer_norm(x, self.c_in, self.weight, self.bias, self.eps) return out @torch.jit.ignore def softmax_no_cast(t: torch.Tensor, dim: int=-1) -> torch.Tensor: d = t.dtype if d is torch.bfloat16 and (not is_deepspeed_initialized()): with torch.cuda.amp.autocast(enabled=False): s = torch.nn.functional.softmax(t, dim=dim) else: s = torch.nn.functional.softmax(t, dim=dim) return s class EsmFoldAttention(nn.Module): def __init__(self, c_q: int, c_k: int, c_v: int, c_hidden: int, no_heads: int, gating: bool=True): super().__init__() self.c_q = c_q self.c_k = c_k self.c_v = c_v self.c_hidden = c_hidden self.no_heads = no_heads self.gating = gating self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init='glorot') self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init='final') self.linear_g = None if self.gating: self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init='gating') self.sigmoid = nn.Sigmoid() def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: q = self.linear_q(q_x) k = self.linear_k(kv_x) v = self.linear_v(kv_x) q = q.view(q.shape[:-1] + (self.no_heads, -1)) k = k.view(k.shape[:-1] + (self.no_heads, -1)) v = v.view(v.shape[:-1] + (self.no_heads, -1)) q = q.transpose(-2, -3) k = k.transpose(-2, -3) v = v.transpose(-2, -3) q /= math.sqrt(self.c_hidden) return (q, k, v) def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor: if self.linear_g is not None: g = self.sigmoid(self.linear_g(q_x)) g = g.view(g.shape[:-1] + (self.no_heads, -1)) o = o * g o = flatten_final_dims(o, 2) o = self.linear_o(o) return o def forward(self, q_x: torch.Tensor, kv_x: torch.Tensor, biases: Optional[List[torch.Tensor]]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, lma_q_chunk_size: int=1024, lma_kv_chunk_size: int=4096, use_flash: bool=False, flash_mask: Optional[torch.Tensor]=None) -> torch.Tensor: if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None): raise ValueError('If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided') if use_flash and biases is not None: raise ValueError('use_flash is incompatible with the bias option. For masking, use flash_mask instead') attn_options = [use_memory_efficient_kernel, use_lma, use_flash] if sum(attn_options) > 1: raise ValueError('Choose at most one alternative attention algorithm') if biases is None: biases = [] (query, key, value) = self._prep_qkv(q_x, kv_x) key = permute_final_dims(key, (1, 0)) output = torch.matmul(query, key) for b in biases: output += b output = softmax_no_cast(output, -1) output = torch.matmul(output, value) output = output.transpose(-2, -3) output = self._wrap_up(output, q_x) return output class EsmFoldTriangleAttention(nn.Module): def __init__(self, c_in, c_hidden, no_heads, starting=True, inf=1000000000.0): super().__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_heads = no_heads self.starting = starting self.inf = inf self.layer_norm = LayerNorm(self.c_in) self.linear = EsmFoldLinear(c_in, self.no_heads, bias=False, init='normal') self.mha = EsmFoldAttention(self.c_in, self.c_in, self.c_in, self.c_hidden, self.no_heads) @torch.jit.ignore def _chunk(self, x: torch.Tensor, biases: List[torch.Tensor], chunk_size: int, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor: mha_inputs = {'q_x': x, 'kv_x': x, 'biases': biases} return chunk_layer(partial(self.mha, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma), mha_inputs, chunk_size=chunk_size, no_batch_dims=len(x.shape[:-2]), _out=x if inplace_safe else None) def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor]=None, chunk_size: Optional[int]=None, use_memory_efficient_kernel: bool=False, use_lma: bool=False, inplace_safe: bool=False) -> torch.Tensor: if mask is None: mask = x.new_ones(x.shape[:-1]) if not self.starting: x = x.transpose(-2, -3) mask = mask.transpose(-1, -2) x = self.layer_norm(x) mask_bias = (self.inf * (mask - 1))[..., :, None, None, :] triangle_bias = permute_final_dims(self.linear(x), (2, 0, 1)) triangle_bias = triangle_bias.unsqueeze(-4) biases = [mask_bias, triangle_bias] if chunk_size is not None: x = self._chunk(x, biases, chunk_size, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma, inplace_safe=inplace_safe) else: x = self.mha(q_x=x, kv_x=x, biases=biases, use_memory_efficient_kernel=use_memory_efficient_kernel, use_lma=use_lma) if not self.starting: x = x.transpose(-2, -3) return x class EsmFoldTriangleMultiplicativeUpdate(nn.Module): def __init__(self, config, _outgoing=True): super().__init__() c_hidden = config.pairwise_state_dim self._outgoing = _outgoing self.linear_a_p = EsmFoldLinear(c_hidden, c_hidden) self.linear_a_g = EsmFoldLinear(c_hidden, c_hidden, init='gating') self.linear_b_p = EsmFoldLinear(c_hidden, c_hidden) self.linear_b_g = EsmFoldLinear(c_hidden, c_hidden, init='gating') self.linear_g = EsmFoldLinear(c_hidden, c_hidden, init='gating') self.linear_z = EsmFoldLinear(c_hidden, c_hidden, init='final') self.layer_norm_in = LayerNorm(c_hidden) self.layer_norm_out = LayerNorm(c_hidden) self.sigmoid = nn.Sigmoid() def _combine_projections(self, a: torch.Tensor, b: torch.Tensor, _inplace_chunk_size: Optional[int]=None) -> torch.Tensor: if self._outgoing: a = permute_final_dims(a, (2, 0, 1)) b = permute_final_dims(b, (2, 1, 0)) else: a = permute_final_dims(a, (2, 1, 0)) b = permute_final_dims(b, (2, 0, 1)) if _inplace_chunk_size is not None: for i in range(0, a.shape[-3], _inplace_chunk_size): a_chunk = a[..., i:i + _inplace_chunk_size, :, :] b_chunk = b[..., i:i + _inplace_chunk_size, :, :] a[..., i:i + _inplace_chunk_size, :, :] = torch.matmul(a_chunk, b_chunk) p = a else: p = torch.matmul(a, b) return permute_final_dims(p, (1, 2, 0)) def _inference_forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_chunk_size: Optional[int]=None, with_add: bool=True): if mask is None: mask = z.new_ones(z.shape[:-1]) mask = mask.unsqueeze(-1) def compute_projection_helper(pair, mask, a=True): if a: linear_g = self.linear_a_g linear_p = self.linear_a_p else: linear_g = self.linear_b_g linear_p = self.linear_b_p pair = self.layer_norm_in(pair) p = linear_g(pair) p.sigmoid_() p *= linear_p(pair) p *= mask p = permute_final_dims(p, (2, 0, 1)) return p def compute_projection(pair, mask, a=True, chunked=True): need_transpose = self._outgoing ^ a if not chunked: p = compute_projection_helper(pair, mask, a) if need_transpose: p = p.transpose(-1, -2) else: linear_g = self.linear_a_g if a else self.linear_b_g c = linear_g.bias.shape[-1] out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1] p = pair.new_zeros(out_shape) for i in range(0, pair.shape[-3], inplace_chunk_size): pair_chunk = pair[..., i:i + inplace_chunk_size, :, :] pair_chunk = compute_projection_helper(pair[..., i:i + inplace_chunk_size, :, :], mask[..., i:i + inplace_chunk_size, :, :], a) if need_transpose: pair_chunk = pair_chunk.transpose(-1, -2) p[..., i:i + inplace_chunk_size] = pair_chunk else: p[..., i:i + inplace_chunk_size, :] = pair_chunk del pair_chunk return p a = compute_projection(z, mask, True, chunked=True) if inplace_chunk_size is not None: n = a.shape[-1] half_n = n // 2 + n % 2 row_dim = -3 col_dim = -2 b_chunk_dim = row_dim if self._outgoing else col_dim def empty_slicer(t): return [slice(None) for _ in t.shape] def slice_tensor(t, start, end, dim): s = empty_slicer(t) s[dim] = slice(start, end) return t[s] def flip_z_cache_(z_cache, z): quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim) z_cache = z_cache.transpose(row_dim, col_dim) z_cache = z_cache[..., :n // 2, :, :] first_half_slicer = empty_slicer(z_cache) first_half_slicer[col_dim] = slice(0, half_n) z_cache[first_half_slicer] = quadrant_3 quadrant_4 = slice_tensor(z, half_n, None, row_dim) quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim) quadrant_3_slicer = empty_slicer(z_cache) quadrant_3_slicer[col_dim] = slice(half_n, None) z_cache[quadrant_3_slicer] = quadrant_4 return z_cache z_cache_shape = list(z.shape) z_cache_shape[col_dim] = half_n z_cache = z.new_zeros(z_cache_shape) z_cache_slicer = empty_slicer(z_cache) z_cache_slicer[col_dim] = slice(0, half_n) z_cache.copy_(z[z_cache_slicer]) z_cache_rotated = False i_range = list(range(0, half_n, inplace_chunk_size)) initial_offsets = [i_2 - i_1 for (i_1, i_2) in zip(i_range, i_range[1:] + [half_n])] after_half = list(range(half_n, n, inplace_chunk_size)) after_half_offsets = [inplace_chunk_size for _ in after_half] combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets) for (i, offset) in combined_range_with_offsets: if not z_cache_rotated and i >= half_n: z_cache = flip_z_cache_(z_cache, z) z_cache_rotated = True z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim) mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim) z_chunk_b = z_chunk_b.clone() if b_chunk_dim == col_dim: z_chunk_b = slice_tensor(z, i, i + offset, col_dim) elif not z_cache_rotated: z_chunk_slicer = empty_slicer(z_chunk_b) z_chunk_slicer[col_dim] = slice(0, half_n) z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim) else: z_cache_offset = i - half_n z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim) b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False) del z_chunk_b x_chunk = torch.matmul(a, b_chunk) x_chunk = permute_final_dims(x_chunk, (1, 2, 0)) x_chunk = self.layer_norm_out(x_chunk) x_chunk = self.linear_z(x_chunk) z_chunk_g = slice_tensor(z, i, i + offset, col_dim) g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g)) g_chunk.sigmoid_() del z_chunk_g x_chunk *= g_chunk z_slicer = empty_slicer(z) z_slicer[col_dim] = slice(i, i + offset) if with_add: z[z_slicer] += x_chunk else: z[z_slicer] = x_chunk else: b = compute_projection(z, mask, False, False) x = torch.matmul(a, b) x = self.layer_norm_out(x) x = self.linear_z(x) g = self.linear_g(z) g.sigmoid_() x *= g if with_add: z += x else: z = x return z def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_safe: bool=False, _add_with_inplace: bool=False, _inplace_chunk_size: Optional[int]=256) -> torch.Tensor: if inplace_safe: x = self._inference_forward(z, mask, inplace_chunk_size=_inplace_chunk_size, with_add=_add_with_inplace) return x if mask is None: mask = z.new_ones(z.shape[:-1]) mask = mask.unsqueeze(-1) z = self.layer_norm_in(z) a = mask a = a * self.sigmoid(self.linear_a_g(z)) a = a * self.linear_a_p(z) b = mask b = b * self.sigmoid(self.linear_b_g(z)) b = b * self.linear_b_p(z) if is_fp16_enabled(): with torch.cuda.amp.autocast(enabled=False): x = self._combine_projections(a.float(), b.float()) else: x = self._combine_projections(a, b) del a, b x = self.layer_norm_out(x) x = self.linear_z(x) g = self.sigmoid(self.linear_g(z)) x = x * g return x class EsmFoldPreTrainedModel(EsmPreTrainedModel): def _init_weights(self, module): if isinstance(module, EsmFoldLinear): with torch.no_grad(): if module.init_fn is not None: module.init_fn(module.weight, module.bias) elif module.init == 'default': trunc_normal_init_(module.weight, scale=1.0) elif module.init == 'relu': trunc_normal_init_(module.weight, scale=2.0) elif module.init == 'glorot': nn.init.xavier_uniform_(module.weight, gain=1) elif module.init == 'gating': module.weight.fill_(0.0) if module.bias: module.bias.fill_(1.0) elif module.init == 'normal': torch.nn.init.kaiming_normal_(module.weight, nonlinearity='linear') elif module.init == 'final': module.weight.fill_(0.0) elif isinstance(module, EsmFoldInvariantPointAttention): ipa_point_weights_init_(module.head_weights) elif isinstance(module, EsmFoldTriangularSelfAttentionBlock): torch.nn.init.zeros_(module.tri_mul_in.linear_z.weight) torch.nn.init.zeros_(module.tri_mul_in.linear_z.bias) torch.nn.init.zeros_(module.tri_mul_out.linear_z.weight) torch.nn.init.zeros_(module.tri_mul_out.linear_z.bias) torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.weight) torch.nn.init.zeros_(module.tri_att_start.mha.linear_o.bias) torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.weight) torch.nn.init.zeros_(module.tri_att_end.mha.linear_o.bias) torch.nn.init.zeros_(module.sequence_to_pair.o_proj.weight) torch.nn.init.zeros_(module.sequence_to_pair.o_proj.bias) torch.nn.init.zeros_(module.pair_to_sequence.linear.weight) torch.nn.init.zeros_(module.seq_attention.o_proj.weight) torch.nn.init.zeros_(module.seq_attention.o_proj.bias) torch.nn.init.zeros_(module.mlp_seq.mlp[-2].weight) torch.nn.init.zeros_(module.mlp_seq.mlp[-2].bias) torch.nn.init.zeros_(module.mlp_pair.mlp[-2].weight) torch.nn.init.zeros_(module.mlp_pair.mlp[-2].bias) else: super()._init_weights(module) class EsmFoldSelfAttention(nn.Module): def __init__(self, embed_dim, num_heads, head_width, gated=False): super().__init__() assert embed_dim == num_heads * head_width self.embed_dim = embed_dim self.num_heads = num_heads self.head_width = head_width self.proj = nn.Linear(embed_dim, embed_dim * 3, bias=False) self.o_proj = nn.Linear(embed_dim, embed_dim, bias=True) self.gated = gated if gated: self.g_proj = nn.Linear(embed_dim, embed_dim) torch.nn.init.zeros_(self.g_proj.weight) torch.nn.init.ones_(self.g_proj.bias) self.rescale_factor = self.head_width ** (-0.5) torch.nn.init.zeros_(self.o_proj.bias) def forward(self, x, mask=None, bias=None, indices=None): t = self.proj(x).view(*x.shape[:2], self.num_heads, -1) t = t.permute(0, 2, 1, 3) (q, k, v) = t.chunk(3, dim=-1) q = self.rescale_factor * q a = torch.einsum('...qc,...kc->...qk', q, k) if bias is not None: a = a + bias.permute(0, 3, 1, 2) if mask is not None: mask = mask[:, None, None] a = a.masked_fill(mask == False, -np.inf) a = nn.functional.softmax(a, dim=-1) y = torch.einsum('...hqk,...hkc->...qhc', a, v) y = y.reshape(*y.shape[:2], -1) if self.gated: y = self.g_proj(x).sigmoid() * y y = self.o_proj(y) return (y, a.permute(0, 3, 1, 2)) class EsmFoldDropout(nn.Module): def __init__(self, r: float, batch_dim: Union[int, List[int]]): super().__init__() self.r = r if isinstance(batch_dim, int): batch_dim = [batch_dim] self.batch_dim = batch_dim self.dropout = nn.Dropout(self.r) def forward(self, x: torch.Tensor) -> torch.Tensor: shape = list(x.shape) if self.batch_dim is not None: for bd in self.batch_dim: shape[bd] = 1 return x * self.dropout(x.new_ones(shape)) class EsmFoldSequenceToPair(nn.Module): def __init__(self, sequence_state_dim, inner_dim, pairwise_state_dim): super().__init__() self.layernorm = nn.LayerNorm(sequence_state_dim) self.proj = nn.Linear(sequence_state_dim, inner_dim * 2, bias=True) self.o_proj = nn.Linear(2 * inner_dim, pairwise_state_dim, bias=True) torch.nn.init.zeros_(self.proj.bias) torch.nn.init.zeros_(self.o_proj.bias) def forward(self, sequence_state): assert len(sequence_state.shape) == 3 s = self.layernorm(sequence_state) s = self.proj(s) (q, k) = s.chunk(2, dim=-1) prod = q[:, None, :, :] * k[:, :, None, :] diff = q[:, None, :, :] - k[:, :, None, :] x = torch.cat([prod, diff], dim=-1) x = self.o_proj(x) return x class EsmFoldPairToSequence(nn.Module): def __init__(self, pairwise_state_dim, num_heads): super().__init__() self.layernorm = nn.LayerNorm(pairwise_state_dim) self.linear = nn.Linear(pairwise_state_dim, num_heads, bias=False) def forward(self, pairwise_state): assert len(pairwise_state.shape) == 4 z = self.layernorm(pairwise_state) pairwise_bias = self.linear(z) return pairwise_bias class EsmFoldResidueMLP(nn.Module): def __init__(self, embed_dim, inner_dim, dropout=0): super().__init__() self.mlp = nn.Sequential(nn.LayerNorm(embed_dim), nn.Linear(embed_dim, inner_dim), nn.ReLU(), nn.Linear(inner_dim, embed_dim), nn.Dropout(dropout)) def forward(self, x): return x + self.mlp(x) class EsmFoldTriangularSelfAttentionBlock(nn.Module): def __init__(self, config): super().__init__() self.config = config sequence_state_dim = config.sequence_state_dim pairwise_state_dim = config.pairwise_state_dim sequence_num_heads = sequence_state_dim // config.sequence_head_width pairwise_num_heads = pairwise_state_dim // config.pairwise_head_width self.layernorm_1 = nn.LayerNorm(sequence_state_dim) self.sequence_to_pair = EsmFoldSequenceToPair(sequence_state_dim, pairwise_state_dim // 2, pairwise_state_dim) self.pair_to_sequence = EsmFoldPairToSequence(pairwise_state_dim, sequence_num_heads) self.seq_attention = EsmFoldSelfAttention(sequence_state_dim, sequence_num_heads, config.sequence_head_width, gated=True) self.tri_mul_out = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=True) self.tri_mul_in = EsmFoldTriangleMultiplicativeUpdate(config, _outgoing=False) self.tri_att_start = EsmFoldTriangleAttention(pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1000000000.0, starting=True) self.tri_att_end = EsmFoldTriangleAttention(pairwise_state_dim, config.pairwise_head_width, pairwise_num_heads, inf=1000000000.0, starting=False) self.mlp_seq = EsmFoldResidueMLP(sequence_state_dim, 4 * sequence_state_dim, dropout=config.dropout) self.mlp_pair = EsmFoldResidueMLP(pairwise_state_dim, 4 * pairwise_state_dim, dropout=config.dropout) self.drop = nn.Dropout(config.dropout) self.row_drop = EsmFoldDropout(config.dropout * 2, 2) self.col_drop = EsmFoldDropout(config.dropout * 2, 1) def forward(self, sequence_state, pairwise_state, mask=None, chunk_size=None, **__kwargs): if len(sequence_state.shape) != 3: raise ValueError(f'`sequence_state` should be a 3d-tensor, got {len(sequence_state.shape)} dims.') if len(pairwise_state.shape) != 4: raise ValueError(f'`pairwise_state` should be a 4d-tensor, got {len(pairwise_state.shape)} dims.') if mask is not None and len(mask.shape) != 2: raise ValueError(f'`mask` should be a 2d-tensor, got {len(mask.shape)} dims.') (batch_dim, seq_dim, sequence_state_dim) = sequence_state.shape pairwise_state_dim = pairwise_state.shape[3] if sequence_state_dim != self.config.sequence_state_dim: raise ValueError(f'`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got {sequence_state_dim} != {self.config.sequence_state_dim}.') if pairwise_state_dim != self.config.pairwise_state_dim: raise ValueError(f'`pairwise_state` last dimension should be equal to `self.pairwise_state_dim`. Got {pairwise_state_dim} != {self.config.pairwise_state_dim}.') if batch_dim != pairwise_state.shape[0]: raise ValueError(f'`sequence_state` and `pairwise_state` have inconsistent batch size: {batch_dim} != {pairwise_state.shape[0]}.') if seq_dim != pairwise_state.shape[1] or seq_dim != pairwise_state.shape[2]: raise ValueError(f'`sequence_state` and `pairwise_state` have inconsistent sequence length: {seq_dim} != {pairwise_state.shape[1]} or {pairwise_state.shape[2]}.') bias = self.pair_to_sequence(pairwise_state) y = self.layernorm_1(sequence_state) (y, _) = self.seq_attention(y, mask=mask, bias=bias) sequence_state = sequence_state + self.drop(y) sequence_state = self.mlp_seq(sequence_state) pairwise_state = pairwise_state + self.sequence_to_pair(sequence_state) tri_mask = mask.unsqueeze(2) * mask.unsqueeze(1) if mask is not None else None pairwise_state = pairwise_state + self.row_drop(self.tri_mul_out(pairwise_state, mask=tri_mask)) pairwise_state = pairwise_state + self.col_drop(self.tri_mul_in(pairwise_state, mask=tri_mask)) pairwise_state = pairwise_state + self.row_drop(self.tri_att_start(pairwise_state, mask=tri_mask, chunk_size=chunk_size)) pairwise_state = pairwise_state + self.col_drop(self.tri_att_end(pairwise_state, mask=tri_mask, chunk_size=chunk_size)) pairwise_state = self.mlp_pair(pairwise_state) return (sequence_state, pairwise_state) class EsmCategoricalMixture: def __init__(self, param, bins=50, start=0, end=1): self.logits = param bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype) self.v_bins = (bins[:-1] + bins[1:]) / 2 def log_prob(self, true): true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1) nll = self.logits.log_softmax(-1) return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1) def mean(self): return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1) def categorical_lddt(logits, bins=50): return EsmCategoricalMixture(logits, bins=bins).mean() def get_axial_mask(mask): if mask is None: return None if len(mask.shape) != 2: raise ValueError(f'`mask` should be a 2d-tensor, got {len(mask.shape)} dims.') (batch_dim, seq_dim) = mask.shape m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim) m = m.reshape(batch_dim * seq_dim, seq_dim) return m class EsmFoldRelativePosition(nn.Module): def __init__(self, config): super().__init__() self.bins = config.position_bins self.embedding = torch.nn.Embedding(2 * self.bins + 2, config.pairwise_state_dim) def forward(self, residue_index, mask=None): if residue_index.dtype != torch.long: raise ValueError(f'`residue_index` has dtype {residue_index.dtype}, it should be `torch.long`.') if mask is not None and residue_index.shape != mask.shape: raise ValueError(f'`residue_index` and `mask` have inconsistent shapes: {residue_index.shape} != {mask.shape}.') diff = residue_index[:, None, :] - residue_index[:, :, None] diff = diff.clamp(-self.bins, self.bins) diff = diff + self.bins + 1 if mask is not None: mask = mask[:, None, :] * mask[:, :, None] diff[mask == False] = 0 output = self.embedding(diff) return output class EsmFoldAngleResnetBlock(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init='relu') self.linear_2 = EsmFoldLinear(config.resnet_dim, config.resnet_dim, init='final') self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class EsmFoldAngleResnet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.linear_in = EsmFoldLinear(config.sequence_dim, config.resnet_dim) self.linear_initial = EsmFoldLinear(config.sequence_dim, config.resnet_dim) self.layers = nn.ModuleList() for _ in range(config.num_resnet_blocks): layer = EsmFoldAngleResnetBlock(config) self.layers.append(layer) self.linear_out = EsmFoldLinear(config.resnet_dim, config.num_angles * 2) self.relu = nn.ReLU() def forward(self, s: torch.Tensor, s_initial: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) s = self.linear_out(s) s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt(torch.clamp(torch.sum(s ** 2, dim=-1, keepdim=True), min=self.config.epsilon)) s = s / norm_denom return (unnormalized_s, s) class EsmFoldInvariantPointAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config c_s = config.sequence_dim c_z = config.pairwise_dim self.hidden_dim = config.ipa_dim self.num_heads = config.num_heads_ipa self.num_qk_points = config.num_qk_points self.num_v_points = config.num_v_points hc = config.ipa_dim * config.num_heads_ipa self.linear_q = EsmFoldLinear(c_s, hc) self.linear_kv = EsmFoldLinear(c_s, 2 * hc) hpq = config.num_heads_ipa * config.num_qk_points * 3 self.linear_q_points = EsmFoldLinear(c_s, hpq) hpkv = config.num_heads_ipa * (config.num_qk_points + config.num_v_points) * 3 self.linear_kv_points = EsmFoldLinear(c_s, hpkv) self.linear_b = EsmFoldLinear(c_z, config.num_heads_ipa) self.head_weights = nn.Parameter(torch.zeros(config.num_heads_ipa)) concat_out_dim = config.num_heads_ipa * (c_z + config.ipa_dim + config.num_v_points * 4) self.linear_out = EsmFoldLinear(concat_out_dim, c_s, init='final') self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward(self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool=False, _z_reference_list: Optional[Sequence[torch.Tensor]]=None) -> torch.Tensor: z = [z] q = self.linear_q(s) kv = self.linear_kv(s) q = q.view(q.shape[:-1] + (self.num_heads, -1)) kv = kv.view(kv.shape[:-1] + (self.num_heads, -1)) (k, v) = torch.split(kv, self.hidden_dim, dim=-1) q_pts = self.linear_q_points(s) q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) q_pts = q_pts.view(q_pts.shape[:-2] + (self.num_heads, self.num_qk_points, 3)) kv_pts = self.linear_kv_points(s) kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.num_heads, -1, 3)) (k_pts, v_pts) = torch.split(kv_pts, [self.num_qk_points, self.num_v_points], dim=-2) b = self.linear_b(z[0]) if _offload_inference: assert sys.getrefcount(z[0]) == 2 z[0] = z[0].cpu() if is_fp16_enabled(): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul(permute_final_dims(q.float(), (1, 0, 2)), permute_final_dims(k.float(), (1, 2, 0))) else: a = torch.matmul(permute_final_dims(q, (1, 0, 2)), permute_final_dims(k, (1, 2, 0))) a *= math.sqrt(1.0 / (3 * self.hidden_dim)) a += math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1)) pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) pt_att = pt_att ** 2 pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view(*(1,) * len(pt_att.shape[:-2]) + (-1, 1)) head_weights = head_weights * math.sqrt(1.0 / (3 * (self.num_qk_points * 9.0 / 2))) pt_att = pt_att * head_weights pt_att = torch.sum(pt_att, dim=-1) * -0.5 square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.config.inf * (square_mask - 1) pt_att = permute_final_dims(pt_att, (2, 0, 1)) a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) o = torch.matmul(a, v.transpose(-2, -3).to(dtype=a.dtype)).transpose(-2, -3) o = flatten_final_dims(o, 2) o_pt = torch.sum(a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :], dim=-2) o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) o_pt_norm = flatten_final_dims(torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.config.epsilon), 2) o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if _offload_inference: z[0] = z[0].to(o_pt.device) o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) o_pair = flatten_final_dims(o_pair, 2) s = self.linear_out(torch.cat((o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1).to(dtype=z[0].dtype)) return s class EsmFoldBackboneUpdate(nn.Module): def __init__(self, config): super().__init__() self.linear = EsmFoldLinear(config.sequence_dim, 6, init='final') def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: update = self.linear(s) return update class EsmFoldStructureModuleTransitionLayer(nn.Module): def __init__(self, config): super().__init__() self.linear_1 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='relu') self.linear_2 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='relu') self.linear_3 = EsmFoldLinear(config.sequence_dim, config.sequence_dim, init='final') self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class EsmFoldStructureModuleTransition(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() for _ in range(config.num_transition_layers): l = EsmFoldStructureModuleTransitionLayer(config) self.layers.append(l) self.dropout = nn.Dropout(config.dropout_rate) self.layer_norm = LayerNorm(config.sequence_dim) def forward(self, s): for l in self.layers: s = l(s) s = self.dropout(s) s = self.layer_norm(s) return s class EsmFoldStructureModule(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer_norm_s = LayerNorm(config.sequence_dim) self.layer_norm_z = LayerNorm(config.pairwise_dim) self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim) self.ipa = EsmFoldInvariantPointAttention(config) self.ipa_dropout = nn.Dropout(config.dropout_rate) self.layer_norm_ipa = LayerNorm(config.sequence_dim) self.transition = EsmFoldStructureModuleTransition(config) self.bb_update = EsmFoldBackboneUpdate(config) self.angle_resnet = EsmFoldAngleResnet(config) def forward(self, evoformer_output_dict, aatype, mask=None, _offload_inference=False): s = evoformer_output_dict['single'] if mask is None: mask = s.new_ones(s.shape[:-1]) s = self.layer_norm_s(s) z = self.layer_norm_z(evoformer_output_dict['pair']) z_reference_list = None if _offload_inference: assert sys.getrefcount(evoformer_output_dict['pair']) == 2 evoformer_output_dict['pair'] = evoformer_output_dict['pair'].cpu() z_reference_list = [z] z = None s_initial = s s = self.linear_in(s) rigids = Rigid.identity(s.shape[:-1], s.dtype, s.device, self.training, fmt='quat') outputs = [] for i in range(self.config.num_blocks): s = s + self.ipa(s, z, rigids, mask, _offload_inference=_offload_inference, _z_reference_list=z_reference_list) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) rigids = rigids.compose_q_update_vec(self.bb_update(s)) backb_to_global = Rigid(Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None), rigids.get_trans()) backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor) (unnormalized_angles, angles) = self.angle_resnet(s, s_initial) all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype) pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype) scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor) preds = {'frames': scaled_rigids.to_tensor_7(), 'sidechain_frames': all_frames_to_global.to_tensor_4x4(), 'unnormalized_angles': unnormalized_angles, 'angles': angles, 'positions': pred_xyz, 'states': s} outputs.append(preds) rigids = rigids.stop_rot_gradient() del z, z_reference_list if _offload_inference: evoformer_output_dict['pair'] = evoformer_output_dict['pair'].to(s.device) outputs = dict_multimap(torch.stack, outputs) outputs['single'] = s return outputs def _init_residue_constants(self, float_dtype, device): if not hasattr(self, 'default_frames'): self.register_buffer('default_frames', torch.tensor(residue_constants.restype_rigid_group_default_frame, dtype=float_dtype, device=device, requires_grad=False), persistent=False) if not hasattr(self, 'group_idx'): self.register_buffer('group_idx', torch.tensor(residue_constants.restype_atom14_to_rigid_group, device=device, requires_grad=False), persistent=False) if not hasattr(self, 'atom_mask'): self.register_buffer('atom_mask', torch.tensor(residue_constants.restype_atom14_mask, dtype=float_dtype, device=device, requires_grad=False), persistent=False) if not hasattr(self, 'lit_positions'): self.register_buffer('lit_positions', torch.tensor(residue_constants.restype_atom14_rigid_group_positions, dtype=float_dtype, device=device, requires_grad=False), persistent=False) def torsion_angles_to_frames(self, r, alpha, f): self._init_residue_constants(alpha.dtype, alpha.device) return torsion_angles_to_frames(r, alpha, f, self.default_frames) def frames_and_literature_positions_to_atom14_pos(self, r, f): self._init_residue_constants(r.get_rots().dtype, r.get_rots().device) return frames_and_literature_positions_to_atom14_pos(r, f, self.default_frames, self.group_idx, self.atom_mask, self.lit_positions) class EsmFoldingTrunk(nn.Module): def __init__(self, config): super().__init__() self.config = config c_s = config.sequence_state_dim c_z = config.pairwise_state_dim self.pairwise_positional_embedding = EsmFoldRelativePosition(config) self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)]) self.recycle_bins = 15 self.recycle_s_norm = nn.LayerNorm(c_s) self.recycle_z_norm = nn.LayerNorm(c_z) self.recycle_disto = nn.Embedding(self.recycle_bins, c_z) self.recycle_disto.weight[0].detach().zero_() self.structure_module = EsmFoldStructureModule(config.structure_module) self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim) self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim) self.chunk_size = config.chunk_size def set_chunk_size(self, chunk_size): self.chunk_size = chunk_size def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles): device = seq_feats.device s_s_0 = seq_feats s_z_0 = pair_feats if no_recycles is None: no_recycles = self.config.max_recycles else: if no_recycles < 0: raise ValueError('Number of recycles must not be negative.') no_recycles += 1 def trunk_iter(s, z, residx, mask): z = z + self.pairwise_positional_embedding(residx, mask=mask) for block in self.blocks: (s, z) = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size) return (s, z) s_s = s_s_0 s_z = s_z_0 recycle_s = torch.zeros_like(s_s) recycle_z = torch.zeros_like(s_z) recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64) for recycle_idx in range(no_recycles): with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]): recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device) recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device) recycle_z += self.recycle_disto(recycle_bins.detach()).to(device) (s_s, s_z) = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask) structure = self.structure_module({'single': self.trunk2sm_s(s_s), 'pair': self.trunk2sm_z(s_z)}, true_aa, mask.float()) recycle_s = s_s recycle_z = s_z recycle_bins = EsmFoldingTrunk.distogram(structure['positions'][-1][:, :, :3], 3.375, 21.375, self.recycle_bins) structure['s_s'] = s_s structure['s_z'] = s_z return structure @staticmethod def distogram(coords, min_bin, max_bin, num_bins): boundaries = torch.linspace(min_bin, max_bin, num_bins - 1, device=coords.device) boundaries = boundaries ** 2 (N, CA, C) = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)] b = CA - N c = C - CA a = b.cross(c, dim=-1) CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True) bins = torch.sum(dists > boundaries, dim=-1) return bins @add_start_docstrings('\n ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed\n by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to\n the rest of the model combined! It outputs a dictionary containing predicted structural information about the input\n protein(s).\n ', ESM_START_DOCSTRING) class EsmForProteinFolding(EsmPreTrainedModel): _no_split_modules = ['EsmFoldStructureModule', 'EsmFoldTriangularSelfAttentionBlock'] def __init__(self, config): super().__init__(config) self.config = config self.distogram_bins = 64 self.esm = EsmModel(config, add_pooling_layer=False) self.esm.requires_grad_(False) if self.config.esmfold_config.fp16_esm: self.esm.half() self.esm_feats = self.config.hidden_size self.esm_attns = self.config.num_hidden_layers * self.config.num_attention_heads self.esm_layers = self.config.num_hidden_layers self.register_buffer('af2_to_esm', self._af2_to_esm_from_vocab_list(config.vocab_list)) self.esm_s_combine = nn.Parameter(torch.zeros(self.esm_layers + 1)) trunk_config = self.config.esmfold_config.trunk c_s = trunk_config.sequence_state_dim c_z = trunk_config.pairwise_state_dim self.esm_s_mlp = nn.Sequential(LayerNorm(self.esm_feats), nn.Linear(self.esm_feats, c_s), nn.ReLU(), nn.Linear(c_s, c_s)) self.n_tokens_embed = residue_constants.restype_num + 3 self.pad_idx = 0 self.unk_idx = self.n_tokens_embed - 2 self.mask_idx = self.n_tokens_embed - 1 self.esm_dict_cls_idx = self.config.vocab_list.index('') self.esm_dict_mask_idx = self.config.vocab_list.index('') self.esm_dict_eos_idx = self.config.vocab_list.index('') self.esm_dict_padding_idx = self.config.vocab_list.index('') if self.config.esmfold_config.embed_aa: self.embedding = nn.Embedding(self.n_tokens_embed, c_s, padding_idx=0) self.trunk = EsmFoldingTrunk(trunk_config) self.distogram_head = nn.Linear(c_z, self.distogram_bins) self.ptm_head = nn.Linear(c_z, self.distogram_bins) self.lm_head = nn.Linear(c_s, self.n_tokens_embed) self.lddt_bins = 50 structure_module_config = trunk_config.structure_module self.lddt_head = nn.Sequential(nn.LayerNorm(structure_module_config.sequence_dim), nn.Linear(structure_module_config.sequence_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, self.config.esmfold_config.lddt_head_hid_dim), nn.Linear(self.config.esmfold_config.lddt_head_hid_dim, 37 * self.lddt_bins)) @staticmethod def _af2_to_esm_from_vocab_list(vocab_list: List[str]) -> torch.Tensor: esm_reorder = [vocab_list.index('')] + [vocab_list.index(v) for v in residue_constants.restypes_with_x] return torch.tensor(esm_reorder) @add_start_docstrings_to_model_forward(ESMFOLD_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=EsmForProteinFoldingOutput, config_class=EsmConfig) def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, masking_pattern: Optional[torch.Tensor]=None, num_recycles: Optional[int]=None) -> EsmForProteinFoldingOutput: cfg = self.config.esmfold_config aa = input_ids B = aa.shape[0] L = aa.shape[1] device = input_ids.device if attention_mask is None: attention_mask = torch.ones_like(aa, device=device) if position_ids is None: position_ids = torch.arange(L, device=device).expand_as(input_ids) esmaa = self.af2_idx_to_esm_idx(aa, attention_mask) if masking_pattern is not None: (masked_aa, esmaa, mlm_targets) = self.bert_mask(aa, esmaa, attention_mask, masking_pattern) else: masked_aa = aa mlm_targets = None esm_s = self.compute_language_model_representations(esmaa) esm_s = esm_s.to(self.esm_s_combine.dtype) if cfg.esm_ablate_sequence: esm_s = esm_s * 0 esm_s = esm_s.detach() esm_s = (self.esm_s_combine.softmax(0).unsqueeze(0) @ esm_s).squeeze(2) s_s_0 = self.esm_s_mlp(esm_s) s_z_0 = s_s_0.new_zeros(B, L, L, cfg.trunk.pairwise_state_dim) if self.config.esmfold_config.embed_aa: s_s_0 += self.embedding(masked_aa) structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles) structure = {k: v for (k, v) in structure.items() if k in ['s_z', 's_s', 'frames', 'sidechain_frames', 'unnormalized_angles', 'angles', 'positions', 'states']} if mlm_targets: structure['mlm_targets'] = mlm_targets disto_logits = self.distogram_head(structure['s_z']) disto_logits = (disto_logits + disto_logits.transpose(1, 2)) / 2 structure['distogram_logits'] = disto_logits lm_logits = self.lm_head(structure['s_s']) structure['lm_logits'] = lm_logits structure['aatype'] = aa make_atom14_masks(structure) for k in ['atom14_atom_exists', 'atom37_atom_exists']: structure[k] *= attention_mask.unsqueeze(-1) structure['residue_index'] = position_ids lddt_head = self.lddt_head(structure['states']).reshape(structure['states'].shape[0], B, L, -1, self.lddt_bins) structure['lddt_head'] = lddt_head plddt = categorical_lddt(lddt_head[-1], bins=self.lddt_bins) structure['plddt'] = plddt ptm_logits = self.ptm_head(structure['s_z']) structure['ptm_logits'] = ptm_logits structure['ptm'] = compute_tm(ptm_logits, max_bin=31, no_bins=self.distogram_bins) structure.update(compute_predicted_aligned_error(ptm_logits, max_bin=31, no_bins=self.distogram_bins)) return EsmForProteinFoldingOutput(**structure) def af2_idx_to_esm_idx(self, aa, mask): if self.af2_to_esm.device != aa.device: self.af2_to_esm = self.af2_to_esm.to(aa.device) aa = (aa + 1).masked_fill(mask != 1, 0) return self.af2_to_esm[aa] def compute_language_model_representations(self, esmaa: torch.Tensor) -> torch.Tensor: device = next(self.parameters()).device (B, L) = esmaa.shape if self.config.esmfold_config.bypass_lm: esm_s = torch.zeros(B, L, self.esm_s_combine.size[0], -1, self.esm_feats, device=device) return esm_s (bosi, eosi) = (self.esm_dict_cls_idx, self.esm_dict_eos_idx) bos = esmaa.new_full((B, 1), bosi) eos = esmaa.new_full((B, 1), self.esm_dict_padding_idx) esmaa = torch.cat([bos, esmaa, eos], dim=1) esmaa[range(B), (esmaa != 1).sum(1)] = eosi esm_hidden_states = self.esm(esmaa, attention_mask=esmaa != 1, output_hidden_states=True)['hidden_states'] esm_s = torch.stack(esm_hidden_states, dim=2) esm_s = esm_s[:, 1:-1] return esm_s def bert_mask(self, aa, esmaa, mask, pattern): new_aa = aa.clone() target = aa.clone() new_esmaa = esmaa.clone() new_aa[pattern == 1] = self.mask_idx target[pattern != 1] = 0 new_esmaa[pattern == 1] = self.esm_dict_mask_idx return (new_aa, new_esmaa, target) @torch.no_grad() def infer(self, seqs: Union[str, List[str]], position_ids=None): if isinstance(seqs, str): lst = [seqs] else: lst = seqs device = next(self.parameters()).device aatype = collate_dense_tensors([torch.from_numpy(residue_constants.sequence_to_onehot(sequence=seq, mapping=residue_constants.restype_order_with_x, map_unknown_to_x=True)).to(device).argmax(dim=1) for seq in lst]) mask = collate_dense_tensors([aatype.new_ones(len(seq)) for seq in lst]) position_ids = torch.arange(aatype.shape[1], device=device).expand(len(lst), -1) if position_ids is None else position_ids.to(device) if position_ids.ndim == 1: position_ids = position_ids.unsqueeze(0) return self.forward(aatype, mask, position_ids=position_ids) @staticmethod def output_to_pdb(output: Dict) -> List[str]: output = {k: v.to('cpu').numpy() for (k, v) in output.items()} pdbs = [] final_atom_positions = atom14_to_atom37(output['positions'][-1], output) final_atom_mask = output['atom37_atom_exists'] for i in range(output['aatype'].shape[0]): aa = output['aatype'][i] pred_pos = final_atom_positions[i] mask = final_atom_mask[i] resid = output['residue_index'][i] + 1 pred = OFProtein(aatype=aa, atom_positions=pred_pos, atom_mask=mask, residue_index=resid, b_factors=output['plddt'][i]) pdbs.append(to_pdb(pred)) return pdbs def infer_pdb(self, seqs, *args, **kwargs) -> str: assert isinstance(seqs, str) output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output)[0] def infer_pdbs(self, seqs: List[str], *args, **kwargs) -> List[str]: output = self.infer(seqs, *args, **kwargs) return self.output_to_pdb(output) # File: transformers-main/src/transformers/models/esm/modeling_tf_esm.py """""" from __future__ import annotations import os from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFMaskedLMOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, shape_list, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import logging from .configuration_esm import EsmConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/esm2_t6_8M_UR50D' _CONFIG_FOR_DOC = 'EsmConfig' def rotate_half(x): (x1, x2) = tf.split(x, 2, axis=-1) return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(x, cos, sin): cos = cos[:, :, :tf.shape(x)[-2], :] sin = sin[:, :, :tf.shape(x)[-2], :] return x * cos + rotate_half(x) * sin def symmetrize(x): return x + tf.linalg.matrix_transpose(x) def average_product_correct(x): a1 = tf.reduce_sum(x, -1, keepdims=True) a2 = tf.reduce_sum(x, -2, keepdims=True) a12 = tf.reduce_sum(x, (-1, -2), keepdims=True) avg = a1 * a2 avg = avg / a12 normalized = x - avg return normalized class TFRotaryEmbedding(keras.layers.Layer): def __init__(self, dim: int, name=None): super().__init__(name=name) self.dim = dim def build(self, input_shape): super().build(input_shape) self.inv_freq = self.add_weight('inv_freq', shape=(self.dim // 2,), dtype=tf.float32, initializer=get_initializer(1.0), trainable=False) self.inv_freq.assign(1.0 / 10000 ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) def _compute_cos_sin(self, x, seq_dimension=2): seq_len = tf.shape(x)[seq_dimension] t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum('i, j -> ij', t, self.inv_freq) emb = tf.concat((freqs, freqs), axis=-1)[None, None, :, :] return (tf.cos(emb), tf.sin(emb)) def call(self, q: tf.Tensor, k: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: (cos_emb, sin_emb) = self._compute_cos_sin(k, seq_dimension=-2) return (apply_rotary_pos_emb(q, cos_emb, sin_emb), apply_rotary_pos_emb(k, cos_emb, sin_emb)) class TFEsmContactPredictionHead(keras.layers.Layer): def __init__(self, in_features: int, bias=True, eos_idx: int=2, name=None): super().__init__(name=name) self.eos_idx = eos_idx self.in_features = in_features self.regression = keras.layers.Dense(1, use_bias=bias, activation='sigmoid', name='regression') def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'regression', None) is not None: with tf.name_scope(self.regression.name): self.regression.build((None, self.in_features)) def call(self, tokens, attentions): eos_mask = tf.cast(tokens != self.eos_idx, attentions.dtype) eos_mask = tf.expand_dims(eos_mask, 1) * tf.expand_dims(eos_mask, 2) attentions = attentions * eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1] attentions = attentions[..., 1:, 1:] (batch_size, layers, heads, seqlen, _) = shape_list(attentions) attentions = tf.reshape(attentions, (batch_size, layers * heads, seqlen, seqlen)) attentions = average_product_correct(symmetrize(attentions)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) return tf.squeeze(self.regression(attentions), 3) class TFEsmEmbeddings(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.word_embeddings = keras.layers.Embedding(config.vocab_size, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name='word_embeddings') self.position_embeddings = keras.layers.Embedding(config.max_position_embeddings, config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name='position_embeddings') if config.emb_layer_norm_before: self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') else: self.layer_norm = None self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.position_ids = tf.range(config.max_position_embeddings)[None, :] self.padding_idx = config.pad_token_id self.token_dropout = config.token_dropout self.mask_token_id = config.mask_token_id self.config = config def call(self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds if self.token_dropout: embeddings = tf.where((input_ids == self.mask_token_id)[:, :, None], 0.0, embeddings) mask_ratio_train = 0.15 * 0.8 src_lengths = tf.cast(tf.reduce_sum(attention_mask, axis=-1), tf.float32) masked_tokens = input_ids == self.mask_token_id mask_ratio_observed = tf.math.count_nonzero(masked_tokens, dtype=tf.float32, axis=-1) / src_lengths embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if self.layer_norm is not None: embeddings = self.layer_norm(embeddings) if attention_mask is not None: embeddings = embeddings * tf.cast(tf.expand_dims(attention_mask, -1), embeddings.dtype) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = shape_list(inputs_embeds)[:-1] sequence_length = input_shape[1] position_ids = tf.range(start=self.padding_idx + 1, limit=sequence_length + self.padding_idx + 1, dtype=tf.int64) return tf.broadcast_to(tf.expand_dims(position_ids, 0), input_shape) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'word_embeddings', None) is not None: with tf.name_scope(self.word_embeddings.name): self.word_embeddings.build(None) if getattr(self, 'position_embeddings', None) is not None: with tf.name_scope(self.position_embeddings.name): self.position_embeddings.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) class TFEsmSelfAttention(keras.layers.Layer): def __init__(self, config, position_embedding_type=None, name=None): super().__init__(name=name) if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='query') self.key = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='key') self.value = keras.layers.Dense(self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name='value') self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') self.rotary_embeddings = None if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = keras.layers.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size, embeddings_initializer=get_initializer(config.initializer_range)) elif self.position_embedding_type == 'rotary': self.rotary_embeddings = TFRotaryEmbedding(dim=self.attention_head_size, name='rotary_embeddings') self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] x = tf.reshape(x, new_x_shape) return tf.transpose(x, perm=(0, 2, 1, 3)) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, output_attentions: Optional[bool]=False, training: bool=False) -> Tuple[tf.Tensor]: mixed_query_layer = self.query(hidden_states) is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) query_layer = query_layer * self.attention_head_size ** (-0.5) if self.is_decoder: past_key_value = (key_layer, value_layer) if self.position_embedding_type == 'rotary': (query_layer, key_layer) = self.rotary_embeddings(query_layer, key_layer) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = shape_list(hidden_states)[1] position_ids_l = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), -1) position_ids_r = tf.expand_dims(tf.range(seq_length, dtype=tf.int64), 0) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = tf.cast(positional_embedding, query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = tf.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = tf.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = tf.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = stable_softmax(attention_scores, axis=-1) attention_probs = self.dropout(attention_probs, training=training) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = attention_probs @ value_layer context_layer = tf.transpose(context_layer, perm=(0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [self.all_head_size] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'query', None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, 'key', None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, 'value', None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, 'rotary_embeddings', None) is not None: with tf.name_scope(self.rotary_embeddings.name): self.rotary_embeddings.build(None) class TFEsmSelfOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmAttention(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.self = TFEsmSelfAttention(config, name='self') self.output_layer = TFEsmSelfOutput(config, name='output') self.pruned_heads = set() self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.config = config def prune_heads(self, heads): raise NotImplementedError def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False): hidden_states_ln = self.LayerNorm(hidden_states) self_outputs = self.self(hidden_states_ln, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training) attention_output = self.output_layer(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self', None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, 'output_layer', None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmIntermediate(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = tf.nn.gelu(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmOutput(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states += input_tensor return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFEsmLayer(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TFEsmAttention(config, name='attention') self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise RuntimeError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = TFEsmAttention(config) self.intermediate = TFEsmIntermediate(config, name='intermediate') self.output_layer = TFEsmOutput(config, name='output') self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') self.config = config def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, training=False): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, training=training) attention_output = self_attention_outputs[0] if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise AttributeError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, training=training) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layernorm_output = self.LayerNorm(attention_output) intermediate_output = self.intermediate(hidden_states=layernorm_output) layer_output = self.output_layer(hidden_states=intermediate_output, input_tensor=attention_output, training=training) outputs = (layer_output,) + outputs if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'intermediate', None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, 'output_layer', None) is not None: with tf.name_scope(self.output_layer.name): self.output_layer.build(None) if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFEsmEncoder(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.config = config self.layer = [TFEsmLayer(config, name=f'layer_._{i}') for i in range(config.num_hidden_layers)] self.emb_layer_norm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='emb_layer_norm_after') def call(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=False): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, training) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.emb_layer_norm_after: hidden_states = self.emb_layer_norm_after(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'emb_layer_norm_after', None) is not None: with tf.name_scope(self.emb_layer_norm_after.name): self.emb_layer_norm_after.build([None, None, self.config.hidden_size]) if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFEsmPooler(keras.layers.Layer): def __init__(self, config: EsmConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFEsmPreTrainedModel(TFPreTrainedModel): config_class = EsmConfig base_model_prefix = 'esm' ESM_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a\n regular Keras model and refer to the TF/Keras documentation for all matters related to general usage and behavior.\n\n Parameters:\n config ([`EsmConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' ESM_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare ESM Model transformer outputting raw hidden-states without any specific head on top.', ESM_START_DOCSTRING) class TFEsmMainLayer(keras.layers.Layer): _keys_to_ignore_on_load_missing = ['position_ids'] def __init__(self, config, add_pooling_layer=True, name=None, **kwargs): super().__init__(name=name, **kwargs) self.config = config self.is_decoder = config.is_decoder self.embeddings = TFEsmEmbeddings(config, name='embeddings') self.encoder = TFEsmEncoder(config, name='encoder') self.pooler = TFEsmPooler(config, name='pooler') if add_pooling_layer else None self.contact_head = TFEsmContactPredictionHead(in_features=self.config.num_hidden_layers * self.config.num_attention_heads, bias=True, name='contact_head') def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'pooler', None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, 'contact_head', None) is not None: with tf.name_scope(self.contact_head.name): self.contact_head.build(None) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.word_embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) embedding_output = self.embeddings(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training) attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal(tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None]) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape(extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2])) if past_key_values[0] is not None: extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if self.is_decoder and encoder_attention_mask is not None: encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) def predict_contacts(self, tokens, attention_mask): attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions attns = tf.stack(attns, axis=1) attention_mask = tf.cast(attention_mask, attns.dtype) attns *= attention_mask[:, None, None, None] attns *= attention_mask[:, None, None, :, None] return self.contact_head(tokens, attns) @add_start_docstrings('The bare ESM Model transformer outputting raw hidden-states without any specific head on top.', ESM_START_DOCSTRING) class TFEsmModel(TFEsmPreTrainedModel): def __init__(self, config: EsmConfig, add_pooling_layer=True, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.esm = TFEsmMainLayer(config, add_pooling_layer=add_pooling_layer, name='esm') @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.esm(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'esm', None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) @add_start_docstrings('ESM Model with a `language modeling` head on top.', ESM_START_DOCSTRING) class TFEsmForMaskedLM(TFEsmPreTrainedModel, TFMaskedLanguageModelingLoss): _keys_to_ignore_on_load_missing = ['position_ids'] _keys_to_ignore_on_load_unexpected = ['pooler'] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning('If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.') self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name='esm') self.lm_head = TFEsmLMHead(config, name='lm_head') if config.tie_word_embeddings: with tf.name_scope(os.path.join(self._name_scope(), 'esm', 'embeddings', 'word_embeddings')): self.esm.embeddings.word_embeddings.build((None, None)) self.lm_head.decoder = self.esm.embeddings.word_embeddings.weights[0] def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings def get_lm_head(self): return self.lm_head @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='') def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: masked_lm_loss = self.hf_compute_loss(labels=labels, logits=prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFMaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'esm', None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) class TFEsmLMHead(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name='dense') self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') if config.tie_word_embeddings: self.decoder = None else: self.decoder = keras.layers.Dense(config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name='decoder', use_bias=False) self.config = config def build(self, input_shape=None): if self.built: return self.built = True self.bias = self.add_weight('bias', shape=(self.config.vocab_size,), initializer='zeros', trainable=True) if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, 'decoder', None) is not None and (not self.config.tie_word_embeddings): with tf.name_scope(self.decoder.name): self.decoder.build([None, None, self.config.hidden_size]) def get_bias(self): return {'bias': self.bias} def call(self, features): x = self.dense(features) x = tf.nn.gelu(x) x = self.layer_norm(x) if self.config.tie_word_embeddings: x = tf.matmul(x, self.decoder, transpose_b=True) + self.bias else: x = self.decoder(x) + self.bias return x @add_start_docstrings('\n ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', ESM_START_DOCSTRING) class TFEsmForSequenceClassification(TFEsmPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_missing = ['position_ids'] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name='esm') self.classifier = TFEsmClassificationHead(config, name='classifier') @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'esm', None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings('\n ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', ESM_START_DOCSTRING) class TFEsmForTokenClassification(TFEsmPreTrainedModel, TFTokenClassificationLoss): _keys_to_ignore_on_load_unexpected = ['pooler'] _keys_to_ignore_on_load_missing = ['position_ids'] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.esm = TFEsmMainLayer(config, add_pooling_layer=False, name='esm') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense(config.num_labels, name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'esm', None) is not None: with tf.name_scope(self.esm.name): self.esm.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) class TFEsmClassificationHead(keras.layers.Layer): def __init__(self, config, name=None): super().__init__(name=name) self.dense = keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation='tanh', name='dense') self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.out_proj = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), activation='linear', name='out_proj') self.config = config def call(self, features, training=False): x = features[:, 0, :] x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = tf.cast(input_ids != padding_idx, tf.int64) incremental_indices = (tf.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + padding_idx # File: transformers-main/src/transformers/models/esm/openfold_utils/chunk_utils.py import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]: shapes = [] if isinstance(tree, dict): for v in tree.values(): shapes.extend(_fetch_dims(v)) elif isinstance(tree, (list, tuple)): for t in tree: shapes.extend(_fetch_dims(t)) elif isinstance(tree, torch.Tensor): shapes.append(tree.shape) else: raise TypeError('Not supported') return shapes @torch.jit.ignore def _flat_idx_to_idx(flat_idx: int, dims: Tuple[int, ...]) -> Tuple[int, ...]: idx = [] for d in reversed(dims): idx.append(flat_idx % d) flat_idx = flat_idx // d return tuple(reversed(idx)) @torch.jit.ignore def _get_minimal_slice_set(start: Sequence[int], end: Sequence[int], dims: Sequence[int], start_edges: Optional[Sequence[bool]]=None, end_edges: Optional[Sequence[bool]]=None) -> List[Tuple[slice, ...]]: def reduce_edge_list(l: List[bool]) -> None: tally = True for i in range(len(l)): reversed_idx = -1 * (i + 1) l[reversed_idx] &= tally tally = l[reversed_idx] if start_edges is None: start_edges = [s == 0 for s in start] reduce_edge_list(start_edges) if end_edges is None: end_edges = [e == d - 1 for (e, d) in zip(end, dims)] reduce_edge_list(end_edges) if len(start) == 0: return [()] elif len(start) == 1: return [(slice(start[0], end[0] + 1),)] slices: List[Tuple[slice, ...]] = [] path_list: List[slice] = [] for (s, e) in zip(start, end): if s == e: path_list.append(slice(s, s + 1)) else: break path: Tuple[slice, ...] = tuple(path_list) divergence_idx = len(path) if divergence_idx == len(dims): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None sdi = start[divergence_idx] return tuple((path + (slice(sdi, sdi + 1),) + s for s in _get_minimal_slice_set(start[divergence_idx + 1:], [d - 1 for d in dims[divergence_idx + 1:]], dims[divergence_idx + 1:], start_edges=start_edges[divergence_idx + 1:], end_edges=[True for _ in end_edges[divergence_idx + 1:]]))) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None edi = end[divergence_idx] return tuple((path + (slice(edi, edi + 1),) + s for s in _get_minimal_slice_set([0 for _ in start[divergence_idx + 1:]], end[divergence_idx + 1:], dims[divergence_idx + 1:], start_edges=[True for _ in start_edges[divergence_idx + 1:]], end_edges=end_edges[divergence_idx + 1:]))) if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1),)) elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx], end[divergence_idx]),)) slices.extend(lower()) elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1),)) else: slices.extend(upper()) middle_ground = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor: batch_dims = t.shape[:no_batch_dims] start_idx = list(_flat_idx_to_idx(flat_start, batch_dims)) end_idx = list(_flat_idx_to_idx(flat_end - 1, batch_dims)) slices = _get_minimal_slice_set(start_idx, end_idx, batch_dims) sliced_tensors = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def chunk_layer(layer: Callable, inputs: Dict[str, Any], chunk_size: int, no_batch_dims: int, low_mem: bool=False, _out: Any=None, _add_into_out: bool=False) -> Any: if not len(inputs) > 0: raise ValueError('Must provide at least one input') initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)] orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)]) def _prep_inputs(t: torch.Tensor) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims]) == no_batch_dims: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) t = t.reshape(-1, *t.shape[no_batch_dims:]) else: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t prepped_inputs: Dict[str, Any] = tensor_tree_map(_prep_inputs, inputs) prepped_outputs = None if _out is not None: prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out) flat_batch_dim = 1 for d in orig_batch_dims: flat_batch_dim *= d no_chunks = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(t: torch.Tensor) -> torch.Tensor: return t[i:i + chunk_size] if t.shape[0] != 1 else t i = 0 out = prepped_outputs for _ in range(no_chunks): if not low_mem: select_chunk = _select_chunk else: select_chunk = partial(_chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, i + chunk_size), no_batch_dims=len(orig_batch_dims)) chunks: Dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs) output_chunk = layer(**chunks) if out is None: out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk) if isinstance(output_chunk, dict): def assign(d1: dict, d2: dict) -> None: for (k, v) in d1.items(): if isinstance(v, dict): assign(v, d2[k]) elif _add_into_out: v[i:i + chunk_size] += d2[k] else: v[i:i + chunk_size] = d2[k] assign(out, output_chunk) elif isinstance(output_chunk, tuple): for (x1, x2) in zip(out, output_chunk): if _add_into_out: x1[i:i + chunk_size] += x2 else: x1[i:i + chunk_size] = x2 elif isinstance(output_chunk, torch.Tensor): if _add_into_out: out[i:i + chunk_size] += output_chunk else: out[i:i + chunk_size] = output_chunk else: raise TypeError('Not supported') i += chunk_size out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out) return out class ChunkSizeTuner: def __init__(self, max_chunk_size: int=512): self.max_chunk_size = max_chunk_size self.cached_chunk_size: Optional[int] = None self.cached_arg_data: Optional[tuple] = None def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int: logging.info('Tuning chunk size...') if min_chunk_size >= self.max_chunk_size: return min_chunk_size candidates: List[int] = [2 ** l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)] candidates = [c for c in candidates if c > min_chunk_size] candidates = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(chunk_size: int) -> bool: try: with torch.no_grad(): fn(*args, chunk_size=chunk_size) return True except RuntimeError: return False min_viable_chunk_size_index = 0 i = len(candidates) - 1 while i > min_viable_chunk_size_index: viable = test_chunk_size(candidates[i]) if not viable: i = (min_viable_chunk_size_index + i) // 2 else: min_viable_chunk_size_index = i i = (i + len(candidates) - 1) // 2 return candidates[min_viable_chunk_size_index] def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool: consistent = True for (a1, a2) in zip(ac1, ac2): assert type(ac1) is type(ac2) if isinstance(ac1, (list, tuple)): consistent &= self._compare_arg_caches(a1, a2) elif isinstance(ac1, dict): a1_items = [v for (_, v) in sorted(a1.items(), key=lambda x: x[0])] a2_items = [v for (_, v) in sorted(a2.items(), key=lambda x: x[0])] consistent &= self._compare_arg_caches(a1_items, a2_items) else: consistent &= a1 == a2 return consistent def tune_chunk_size(self, representative_fn: Callable, args: tuple, min_chunk_size: int) -> int: consistent = True arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object) if self.cached_arg_data is not None: assert len(self.cached_arg_data) == len(arg_data) consistent = self._compare_arg_caches(self.cached_arg_data, arg_data) else: consistent = False if not consistent: self.cached_chunk_size = self._determine_favorable_chunk_size(representative_fn, args, min_chunk_size) self.cached_arg_data = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size # File: transformers-main/src/transformers/models/esm/openfold_utils/data_transforms.py from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def make_atom14_masks(protein: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: restype_atom14_to_atom37_list = [] restype_atom37_to_atom14_list = [] restype_atom14_mask_list = [] for rt in rc.restypes: atom_names = rc.restype_name_to_atom14_names[rc.restype_1to3[rt]] restype_atom14_to_atom37_list.append([rc.atom_order[name] if name else 0 for name in atom_names]) atom_name_to_idx14 = {name: i for (i, name) in enumerate(atom_names)} restype_atom37_to_atom14_list.append([atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0 for name in rc.atom_types]) restype_atom14_mask_list.append([1.0 if name else 0.0 for name in atom_names]) restype_atom14_to_atom37_list.append([0] * 14) restype_atom37_to_atom14_list.append([0] * 37) restype_atom14_mask_list.append([0.0] * 14) restype_atom14_to_atom37 = torch.tensor(restype_atom14_to_atom37_list, dtype=torch.int32, device=protein['aatype'].device) restype_atom37_to_atom14 = torch.tensor(restype_atom37_to_atom14_list, dtype=torch.int32, device=protein['aatype'].device) restype_atom14_mask = torch.tensor(restype_atom14_mask_list, dtype=torch.float32, device=protein['aatype'].device) protein_aatype = protein['aatype'].to(torch.long) residx_atom14_to_atom37 = restype_atom14_to_atom37[protein_aatype] residx_atom14_mask = restype_atom14_mask[protein_aatype] protein['atom14_atom_exists'] = residx_atom14_mask protein['residx_atom14_to_atom37'] = residx_atom14_to_atom37.long() residx_atom37_to_atom14 = restype_atom37_to_atom14[protein_aatype] protein['residx_atom37_to_atom14'] = residx_atom37_to_atom14.long() restype_atom37_mask = torch.zeros([21, 37], dtype=torch.float32, device=protein['aatype'].device) for (restype, restype_letter) in enumerate(rc.restypes): restype_name = rc.restype_1to3[restype_letter] atom_names = rc.residue_atoms[restype_name] for atom_name in atom_names: atom_type = rc.atom_order[atom_name] restype_atom37_mask[restype, atom_type] = 1 residx_atom37_mask = restype_atom37_mask[protein_aatype] protein['atom37_atom_exists'] = residx_atom37_mask return protein def make_atom14_masks_np(batch: Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]: batch = tree_map(lambda n: torch.tensor(n, device=batch['aatype'].device), batch, np.ndarray) out = tensor_tree_map(lambda t: np.array(t), make_atom14_masks(batch)) return out # File: transformers-main/src/transformers/models/esm/openfold_utils/feats.py from typing import Dict, Tuple, overload import torch import torch.types from torch import nn from . import residue_constants as rc from .rigid_utils import Rigid, Rotation from .tensor_utils import batched_gather @overload def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: None) -> torch.Tensor: ... @overload def pseudo_beta_fn(aatype: torch.Tensor, all_atom_positions: torch.Tensor, all_atom_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: ... def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): is_gly = aatype == rc.restype_order['G'] ca_idx = rc.atom_order['CA'] cb_idx = rc.atom_order['CB'] pseudo_beta = torch.where(is_gly[..., None].expand(*(-1,) * len(is_gly.shape), 3), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :]) if all_atom_masks is not None: pseudo_beta_mask = torch.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) return (pseudo_beta, pseudo_beta_mask) else: return pseudo_beta def atom14_to_atom37(atom14: torch.Tensor, batch: Dict[str, torch.Tensor]) -> torch.Tensor: atom37_data = batched_gather(atom14, batch['residx_atom37_to_atom14'], dim=-2, no_batch_dims=len(atom14.shape[:-2])) atom37_data = atom37_data * batch['atom37_atom_exists'][..., None] return atom37_data def build_template_angle_feat(template_feats: Dict[str, torch.Tensor]) -> torch.Tensor: template_aatype = template_feats['template_aatype'] torsion_angles_sin_cos = template_feats['template_torsion_angles_sin_cos'] alt_torsion_angles_sin_cos = template_feats['template_alt_torsion_angles_sin_cos'] torsion_angles_mask = template_feats['template_torsion_angles_mask'] template_angle_feat = torch.cat([nn.functional.one_hot(template_aatype, 22), torsion_angles_sin_cos.reshape(*torsion_angles_sin_cos.shape[:-2], 14), alt_torsion_angles_sin_cos.reshape(*alt_torsion_angles_sin_cos.shape[:-2], 14), torsion_angles_mask], dim=-1) return template_angle_feat def build_template_pair_feat(batch: Dict[str, torch.Tensor], min_bin: torch.types.Number, max_bin: torch.types.Number, no_bins: int, use_unit_vector: bool=False, eps: float=1e-20, inf: float=100000000.0) -> torch.Tensor: template_mask = batch['template_pseudo_beta_mask'] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] tpb = batch['template_pseudo_beta'] dgram = torch.sum((tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot: torch.LongTensor = nn.functional.one_hot(batch['template_aatype'], rc.restype_num + 2) n_res = batch['template_aatype'].shape[-1] to_concat.append(aatype_one_hot[..., None, :, :].expand(*aatype_one_hot.shape[:-2], n_res, -1, -1)) to_concat.append(aatype_one_hot[..., None, :].expand(*aatype_one_hot.shape[:-2], -1, n_res, -1)) (n, ca, c) = [rc.atom_order[a] for a in ['N', 'CA', 'C']] rigids = Rigid.make_transform_from_reference(n_xyz=batch['template_all_atom_positions'][..., n, :], ca_xyz=batch['template_all_atom_positions'][..., ca, :], c_xyz=batch['template_all_atom_positions'][..., c, :], eps=eps) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec ** 2, dim=-1)) t_aa_masks = batch['template_all_atom_mask'] template_mask = t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if not use_unit_vector: unit_vector = unit_vector * 0.0 to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch: Dict[str, torch.Tensor]) -> torch.Tensor: msa_1hot: torch.LongTensor = nn.functional.one_hot(batch['extra_msa'], 23) msa_feat = [msa_1hot, batch['extra_has_deletion'].unsqueeze(-1), batch['extra_deletion_value'].unsqueeze(-1)] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames(r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor) -> Rigid: default_4x4 = rrgdf[aatype, ...] default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*(1,) * len(alpha.shape[:-1]), 2)) bb_rot[..., 1] = 1 alpha = torch.cat([bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2) all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha all_frames = default_r.compose(Rigid(Rotation(rot_mats=all_rots), None)) chi2_frame_to_frame = all_frames[..., 5] chi3_frame_to_frame = all_frames[..., 6] chi4_frame_to_frame = all_frames[..., 7] chi1_frame_to_bb = all_frames[..., 4] chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame) chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame) chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame) all_frames_to_bb = Rigid.cat([all_frames[..., :5], chi2_frame_to_bb.unsqueeze(-1), chi3_frame_to_bb.unsqueeze(-1), chi4_frame_to_bb.unsqueeze(-1)], dim=-1) all_frames_to_global = r[..., None].compose(all_frames_to_bb) return all_frames_to_global def frames_and_literature_positions_to_atom14_pos(r: Rigid, aatype: torch.Tensor, default_frames: torch.Tensor, group_idx: torch.Tensor, atom_mask: torch.Tensor, lit_positions: torch.Tensor) -> torch.Tensor: group_mask = group_idx[aatype, ...] group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(group_mask, num_classes=default_frames.shape[-3]) t_atoms_to_global = r[..., None, :] * group_mask_one_hot t_atoms_to_global = t_atoms_to_global.map_tensor_fn(lambda x: torch.sum(x, dim=-1)) atom_mask = atom_mask[aatype, ...].unsqueeze(-1) lit_positions = lit_positions[aatype, ...] pred_positions = t_atoms_to_global.apply(lit_positions) pred_positions = pred_positions * atom_mask return pred_positions # File: transformers-main/src/transformers/models/esm/openfold_utils/loss.py from typing import Dict, Optional, Tuple import torch def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor: step = boundaries[1] - boundaries[0] bin_centers = boundaries + step / 2 bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0) return bin_centers def _calculate_expected_aligned_error(alignment_confidence_breaks: torch.Tensor, aligned_distance_error_probs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: bin_centers = _calculate_bin_centers(alignment_confidence_breaks) return (torch.sum(aligned_distance_error_probs * bin_centers, dim=-1), bin_centers[-1]) def compute_predicted_aligned_error(logits: torch.Tensor, max_bin: int=31, no_bins: int=64, **kwargs) -> Dict[str, torch.Tensor]: boundaries = torch.linspace(0, max_bin, steps=no_bins - 1, device=logits.device) aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1) (predicted_aligned_error, max_predicted_aligned_error) = _calculate_expected_aligned_error(alignment_confidence_breaks=boundaries, aligned_distance_error_probs=aligned_confidence_probs) return {'aligned_confidence_probs': aligned_confidence_probs, 'predicted_aligned_error': predicted_aligned_error, 'max_predicted_aligned_error': max_predicted_aligned_error} def compute_tm(logits: torch.Tensor, residue_weights: Optional[torch.Tensor]=None, max_bin: int=31, no_bins: int=64, eps: float=1e-08, **kwargs) -> torch.Tensor: if residue_weights is None: residue_weights = logits.new_ones(logits.shape[-2]) boundaries = torch.linspace(0, max_bin, steps=no_bins - 1, device=logits.device) bin_centers = _calculate_bin_centers(boundaries) torch.sum(residue_weights) n = logits.shape[-2] clipped_n = max(n, 19) d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8 probs = torch.nn.functional.softmax(logits, dim=-1) tm_per_bin = 1.0 / (1 + bin_centers ** 2 / d0 ** 2) predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1) normed_residue_mask = residue_weights / (eps + residue_weights.sum()) per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1) weighted = per_alignment * residue_weights argmax = (weighted == torch.max(weighted)).nonzero()[0] return per_alignment[tuple(argmax)] # File: transformers-main/src/transformers/models/esm/openfold_utils/protein.py """""" import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants FeatureDict = Mapping[str, np.ndarray] ModelOutput = Mapping[str, Any] PICO_TO_ANGSTROM = 0.01 @dataclasses.dataclass(frozen=True) class Protein: atom_positions: np.ndarray aatype: np.ndarray atom_mask: np.ndarray residue_index: np.ndarray b_factors: np.ndarray chain_index: Optional[np.ndarray] = None remark: Optional[str] = None parents: Optional[Sequence[str]] = None parents_chain_index: Optional[Sequence[int]] = None def from_proteinnet_string(proteinnet_str: str) -> Protein: tag_re = '(\\[[A-Z]+\\]\\n)' tags: List[str] = [tag.strip() for tag in re.split(tag_re, proteinnet_str) if len(tag) > 0] groups: Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split('\n') for l in tags[1::2]]) atoms: List[str] = ['N', 'CA', 'C'] aatype = None atom_positions = None atom_mask = None for g in groups: if '[PRIMARY]' == g[0]: seq = g[1][0].strip() for i in range(len(seq)): if seq[i] not in residue_constants.restypes: seq[i] = 'X' aatype = np.array([residue_constants.restype_order.get(res_symbol, residue_constants.restype_num) for res_symbol in seq]) elif '[TERTIARY]' == g[0]: tertiary: List[List[float]] = [] for axis in range(3): tertiary.append(list(map(float, g[1][axis].split()))) tertiary_np = np.array(tertiary) atom_positions = np.zeros((len(tertiary[0]) // 3, residue_constants.atom_type_num, 3)).astype(np.float32) for (i, atom) in enumerate(atoms): atom_positions[:, residue_constants.atom_order[atom], :] = np.transpose(tertiary_np[:, i::3]) atom_positions *= PICO_TO_ANGSTROM elif '[MASK]' == g[0]: mask = np.array(list(map({'-': 0, '+': 1}.get, g[1][0].strip()))) atom_mask = np.zeros((len(mask), residue_constants.atom_type_num)).astype(np.float32) for (i, atom) in enumerate(atoms): atom_mask[:, residue_constants.atom_order[atom]] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein(atom_positions=atom_positions, atom_mask=atom_mask, aatype=aatype, residue_index=np.arange(len(aatype)), b_factors=None) def get_pdb_headers(prot: Protein, chain_id: int=0) -> List[str]: pdb_headers: List[str] = [] remark = prot.remark if remark is not None: pdb_headers.append(f'REMARK {remark}') parents = prot.parents parents_chain_index = prot.parents_chain_index if parents is not None and parents_chain_index is not None: parents = [p for (i, p) in zip(parents_chain_index, parents) if i == chain_id] if parents is None or len(parents) == 0: parents = ['N/A'] pdb_headers.append(f"PARENT {' '.join(parents)}") return pdb_headers def add_pdb_headers(prot: Protein, pdb_str: str) -> str: out_pdb_lines: List[str] = [] lines = pdb_str.split('\n') remark = prot.remark if remark is not None: out_pdb_lines.append(f'REMARK {remark}') parents_per_chain: List[List[str]] if prot.parents is not None and len(prot.parents) > 0: parents_per_chain = [] if prot.parents_chain_index is not None: parent_dict: Dict[str, List[str]] = {} for (p, i) in zip(prot.parents, prot.parents_chain_index): parent_dict.setdefault(str(i), []) parent_dict[str(i)].append(p) max_idx = max([int(chain_idx) for chain_idx in parent_dict]) for i in range(max_idx + 1): chain_parents = parent_dict.get(str(i), ['N/A']) parents_per_chain.append(chain_parents) else: parents_per_chain.append(list(prot.parents)) else: parents_per_chain = [['N/A']] def make_parent_line(p: Sequence[str]) -> str: return f"PARENT {' '.join(p)}" out_pdb_lines.append(make_parent_line(parents_per_chain[0])) chain_counter = 0 for (i, l) in enumerate(lines): if 'PARENT' not in l and 'REMARK' not in l: out_pdb_lines.append(l) if 'TER' in l and 'END' not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(parents_per_chain): chain_parents = parents_per_chain[chain_counter] else: chain_parents = ['N/A'] out_pdb_lines.append(make_parent_line(chain_parents)) return '\n'.join(out_pdb_lines) def to_pdb(prot: Protein) -> str: restypes = residue_constants.restypes + ['X'] def res_1to3(r: int) -> str: return residue_constants.restype_1to3.get(restypes[r], 'UNK') atom_types = residue_constants.atom_types pdb_lines: List[str] = [] atom_mask = prot.atom_mask aatype = prot.aatype atom_positions = prot.atom_positions residue_index = prot.residue_index.astype(np.int32) b_factors = prot.b_factors chain_index = prot.chain_index if np.any(aatype > residue_constants.restype_num): raise ValueError('Invalid aatypes.') headers = get_pdb_headers(prot) if len(headers) > 0: pdb_lines.extend(headers) n = aatype.shape[0] atom_index = 1 prev_chain_index = 0 chain_tags = string.ascii_uppercase chain_tag = None for i in range(n): res_name_3 = res_1to3(aatype[i]) for (atom_name, pos, mask, b_factor) in zip(atom_types, atom_positions[i], atom_mask[i], b_factors[i]): if mask < 0.5: continue record_type = 'ATOM' name = atom_name if len(atom_name) == 4 else f' {atom_name}' alt_loc = '' insertion_code = '' occupancy = 1.0 element = atom_name[0] charge = '' chain_tag = 'A' if chain_index is not None: chain_tag = chain_tags[chain_index[i]] atom_line = f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}{res_name_3:>3} {chain_tag:>1}{residue_index[i]:>4}{insertion_code:>1} {pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}{occupancy:>6.2f}{b_factor:>6.2f} {element:>2}{charge:>2}' pdb_lines.append(atom_line) atom_index += 1 should_terminate = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: should_terminate = True prev_chain_index = chain_index[i + 1] if should_terminate: chain_end = 'TER' chain_termination_line = f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[i]):>3} {chain_tag:>1}{residue_index[i]:>4}' pdb_lines.append(chain_termination_line) atom_index += 1 if i != n - 1: pdb_lines.extend(get_pdb_headers(prot, prev_chain_index)) pdb_lines.append('END') pdb_lines.append('') return '\n'.join(pdb_lines) def ideal_atom_mask(prot: Protein) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def from_prediction(features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray]=None, chain_index: Optional[np.ndarray]=None, remark: Optional[str]=None, parents: Optional[Sequence[str]]=None, parents_chain_index: Optional[Sequence[int]]=None) -> Protein: return Protein(aatype=features['aatype'], atom_positions=result['final_atom_positions'], atom_mask=result['final_atom_mask'], residue_index=features['residue_index'] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask']), chain_index=chain_index, remark=remark, parents=parents, parents_chain_index=parents_chain_index) # File: transformers-main/src/transformers/models/esm/openfold_utils/residue_constants.py """""" import collections import copy import functools from importlib import resources from typing import Dict, List, Mapping, Sequence, Tuple import numpy as np ca_ca = 3.80209737096 chi_angles_atoms: Dict[str, List[List[str]]] = {'ALA': [], 'ARG': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], ['CB', 'CG', 'CD', 'NE'], ['CG', 'CD', 'NE', 'CZ']], 'ASN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']], 'ASP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'OD1']], 'CYS': [['N', 'CA', 'CB', 'SG']], 'GLN': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], ['CB', 'CG', 'CD', 'OE1']], 'GLU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], ['CB', 'CG', 'CD', 'OE1']], 'GLY': [], 'HIS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'ND1']], 'ILE': [['N', 'CA', 'CB', 'CG1'], ['CA', 'CB', 'CG1', 'CD1']], 'LEU': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], 'LYS': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD'], ['CB', 'CG', 'CD', 'CE'], ['CG', 'CD', 'CE', 'NZ']], 'MET': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'SD'], ['CB', 'CG', 'SD', 'CE']], 'PHE': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], 'PRO': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD']], 'SER': [['N', 'CA', 'CB', 'OG']], 'THR': [['N', 'CA', 'CB', 'OG1']], 'TRP': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], 'TYR': [['N', 'CA', 'CB', 'CG'], ['CA', 'CB', 'CG', 'CD1']], 'VAL': [['N', 'CA', 'CB', 'CG1']]} chi_angles_mask: List[List[float]] = [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]] chi_pi_periodic: List[List[float]] = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]] rigid_group_atom_positions: Dict[str, List[Tuple[str, int, Tuple[float, float, float]]]] = {'ALA': [('N', 0, (-0.525, 1.363, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, -0.0, -0.0)), ('CB', 0, (-0.529, -0.774, -1.205)), ('O', 3, (0.627, 1.062, 0.0))], 'ARG': [('N', 0, (-0.524, 1.362, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, -0.0, -0.0)), ('CB', 0, (-0.524, -0.778, -1.209)), ('O', 3, (0.626, 1.062, 0.0)), ('CG', 4, (0.616, 1.39, -0.0)), ('CD', 5, (0.564, 1.414, 0.0)), ('NE', 6, (0.539, 1.357, -0.0)), ('NH1', 7, (0.206, 2.301, 0.0)), ('NH2', 7, (2.078, 0.978, -0.0)), ('CZ', 7, (0.758, 1.093, -0.0))], 'ASN': [('N', 0, (-0.536, 1.357, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, -0.0, -0.0)), ('CB', 0, (-0.531, -0.787, -1.2)), ('O', 3, (0.625, 1.062, 0.0)), ('CG', 4, (0.584, 1.399, 0.0)), ('ND2', 5, (0.593, -1.188, 0.001)), ('OD1', 5, (0.633, 1.059, 0.0))], 'ASP': [('N', 0, (-0.525, 1.362, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.527, 0.0, -0.0)), ('CB', 0, (-0.526, -0.778, -1.208)), ('O', 3, (0.626, 1.062, -0.0)), ('CG', 4, (0.593, 1.398, -0.0)), ('OD1', 5, (0.61, 1.091, 0.0)), ('OD2', 5, (0.592, -1.101, -0.003))], 'CYS': [('N', 0, (-0.522, 1.362, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.524, 0.0, 0.0)), ('CB', 0, (-0.519, -0.773, -1.212)), ('O', 3, (0.625, 1.062, -0.0)), ('SG', 4, (0.728, 1.653, 0.0))], 'GLN': [('N', 0, (-0.526, 1.361, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, 0.0, 0.0)), ('CB', 0, (-0.525, -0.779, -1.207)), ('O', 3, (0.626, 1.062, -0.0)), ('CG', 4, (0.615, 1.393, 0.0)), ('CD', 5, (0.587, 1.399, -0.0)), ('NE2', 6, (0.593, -1.189, -0.001)), ('OE1', 6, (0.634, 1.06, 0.0))], 'GLU': [('N', 0, (-0.528, 1.361, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, -0.0, -0.0)), ('CB', 0, (-0.526, -0.781, -1.207)), ('O', 3, (0.626, 1.062, 0.0)), ('CG', 4, (0.615, 1.392, 0.0)), ('CD', 5, (0.6, 1.397, 0.0)), ('OE1', 6, (0.607, 1.095, -0.0)), ('OE2', 6, (0.589, -1.104, -0.001))], 'GLY': [('N', 0, (-0.572, 1.337, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.517, -0.0, -0.0)), ('O', 3, (0.626, 1.062, -0.0))], 'HIS': [('N', 0, (-0.527, 1.36, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, 0.0, 0.0)), ('CB', 0, (-0.525, -0.778, -1.208)), ('O', 3, (0.625, 1.063, 0.0)), ('CG', 4, (0.6, 1.37, -0.0)), ('CD2', 5, (0.889, -1.021, 0.003)), ('ND1', 5, (0.744, 1.16, -0.0)), ('CE1', 5, (2.03, 0.851, 0.002)), ('NE2', 5, (2.145, -0.466, 0.004))], 'ILE': [('N', 0, (-0.493, 1.373, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.527, -0.0, -0.0)), ('CB', 0, (-0.536, -0.793, -1.213)), ('O', 3, (0.627, 1.062, -0.0)), ('CG1', 4, (0.534, 1.437, -0.0)), ('CG2', 4, (0.54, -0.785, -1.199)), ('CD1', 5, (0.619, 1.391, 0.0))], 'LEU': [('N', 0, (-0.52, 1.363, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, -0.0, -0.0)), ('CB', 0, (-0.522, -0.773, -1.214)), ('O', 3, (0.625, 1.063, -0.0)), ('CG', 4, (0.678, 1.371, 0.0)), ('CD1', 5, (0.53, 1.43, -0.0)), ('CD2', 5, (0.535, -0.774, 1.2))], 'LYS': [('N', 0, (-0.526, 1.362, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, 0.0, 0.0)), ('CB', 0, (-0.524, -0.778, -1.208)), ('O', 3, (0.626, 1.062, -0.0)), ('CG', 4, (0.619, 1.39, 0.0)), ('CD', 5, (0.559, 1.417, 0.0)), ('CE', 6, (0.56, 1.416, 0.0)), ('NZ', 7, (0.554, 1.387, 0.0))], 'MET': [('N', 0, (-0.521, 1.364, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, 0.0, 0.0)), ('CB', 0, (-0.523, -0.776, -1.21)), ('O', 3, (0.625, 1.062, -0.0)), ('CG', 4, (0.613, 1.391, -0.0)), ('SD', 5, (0.703, 1.695, 0.0)), ('CE', 6, (0.32, 1.786, -0.0))], 'PHE': [('N', 0, (-0.518, 1.363, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.524, 0.0, -0.0)), ('CB', 0, (-0.525, -0.776, -1.212)), ('O', 3, (0.626, 1.062, -0.0)), ('CG', 4, (0.607, 1.377, 0.0)), ('CD1', 5, (0.709, 1.195, -0.0)), ('CD2', 5, (0.706, -1.196, 0.0)), ('CE1', 5, (2.102, 1.198, -0.0)), ('CE2', 5, (2.098, -1.201, -0.0)), ('CZ', 5, (2.794, -0.003, -0.001))], 'PRO': [('N', 0, (-0.566, 1.351, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.527, -0.0, 0.0)), ('CB', 0, (-0.546, -0.611, -1.293)), ('O', 3, (0.621, 1.066, 0.0)), ('CG', 4, (0.382, 1.445, 0.0)), ('CD', 5, (0.477, 1.424, 0.0))], 'SER': [('N', 0, (-0.529, 1.36, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, -0.0, -0.0)), ('CB', 0, (-0.518, -0.777, -1.211)), ('O', 3, (0.626, 1.062, -0.0)), ('OG', 4, (0.503, 1.325, 0.0))], 'THR': [('N', 0, (-0.517, 1.364, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.526, 0.0, -0.0)), ('CB', 0, (-0.516, -0.793, -1.215)), ('O', 3, (0.626, 1.062, 0.0)), ('CG2', 4, (0.55, -0.718, -1.228)), ('OG1', 4, (0.472, 1.353, 0.0))], 'TRP': [('N', 0, (-0.521, 1.363, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.525, -0.0, 0.0)), ('CB', 0, (-0.523, -0.776, -1.212)), ('O', 3, (0.627, 1.062, 0.0)), ('CG', 4, (0.609, 1.37, -0.0)), ('CD1', 5, (0.824, 1.091, 0.0)), ('CD2', 5, (0.854, -1.148, -0.005)), ('CE2', 5, (2.186, -0.678, -0.007)), ('CE3', 5, (0.622, -2.53, -0.007)), ('NE1', 5, (2.14, 0.69, -0.004)), ('CH2', 5, (3.028, -2.89, -0.013)), ('CZ2', 5, (3.283, -1.543, -0.011)), ('CZ3', 5, (1.715, -3.389, -0.011))], 'TYR': [('N', 0, (-0.522, 1.362, 0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.524, -0.0, -0.0)), ('CB', 0, (-0.522, -0.776, -1.213)), ('O', 3, (0.627, 1.062, -0.0)), ('CG', 4, (0.607, 1.382, -0.0)), ('CD1', 5, (0.716, 1.195, -0.0)), ('CD2', 5, (0.713, -1.194, -0.001)), ('CE1', 5, (2.107, 1.2, -0.002)), ('CE2', 5, (2.104, -1.201, -0.003)), ('OH', 5, (4.168, -0.002, -0.005)), ('CZ', 5, (2.791, -0.001, -0.003))], 'VAL': [('N', 0, (-0.494, 1.373, -0.0)), ('CA', 0, (0.0, 0.0, 0.0)), ('C', 0, (1.527, -0.0, -0.0)), ('CB', 0, (-0.533, -0.795, -1.213)), ('O', 3, (0.627, 1.062, -0.0)), ('CG1', 4, (0.54, 1.429, -0.0)), ('CG2', 4, (0.533, -0.776, 1.203))]} residue_atoms: Dict[str, List[str]] = {'ALA': ['C', 'CA', 'CB', 'N', 'O'], 'ARG': ['C', 'CA', 'CB', 'CG', 'CD', 'CZ', 'N', 'NE', 'O', 'NH1', 'NH2'], 'ASP': ['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'], 'ASN': ['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'], 'CYS': ['C', 'CA', 'CB', 'N', 'O', 'SG'], 'GLU': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O', 'OE1', 'OE2'], 'GLN': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'NE2', 'O', 'OE1'], 'GLY': ['C', 'CA', 'N', 'O'], 'HIS': ['C', 'CA', 'CB', 'CG', 'CD2', 'CE1', 'N', 'ND1', 'NE2', 'O'], 'ILE': ['C', 'CA', 'CB', 'CG1', 'CG2', 'CD1', 'N', 'O'], 'LEU': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'N', 'O'], 'LYS': ['C', 'CA', 'CB', 'CG', 'CD', 'CE', 'N', 'NZ', 'O'], 'MET': ['C', 'CA', 'CB', 'CG', 'CE', 'N', 'O', 'SD'], 'PHE': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O'], 'PRO': ['C', 'CA', 'CB', 'CG', 'CD', 'N', 'O'], 'SER': ['C', 'CA', 'CB', 'N', 'O', 'OG'], 'THR': ['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'], 'TRP': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2', 'N', 'NE1', 'O'], 'TYR': ['C', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'N', 'O', 'OH'], 'VAL': ['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O']} residue_atom_renaming_swaps: Dict[str, Dict[str, str]] = {'ASP': {'OD1': 'OD2'}, 'GLU': {'OE1': 'OE2'}, 'PHE': {'CD1': 'CD2', 'CE1': 'CE2'}, 'TYR': {'CD1': 'CD2', 'CE1': 'CE2'}} van_der_waals_radius: Dict[str, float] = {'C': 1.7, 'N': 1.55, 'O': 1.52, 'S': 1.8} Bond = collections.namedtuple('Bond', ['atom1_name', 'atom2_name', 'length', 'stddev']) BondAngle = collections.namedtuple('BondAngle', ['atom1_name', 'atom2_name', 'atom3name', 'angle_rad', 'stddev']) def map_structure_with_atom_order(in_list: list, first_call: bool=True) -> list: if first_call: in_list = copy.deepcopy(in_list) for i in range(len(in_list)): if isinstance(in_list[i], list): in_list[i] = map_structure_with_atom_order(in_list[i], first_call=False) elif isinstance(in_list[i], str): in_list[i] = atom_order[in_list[i]] else: raise TypeError('Unexpected type when mapping nested lists!') return in_list @functools.lru_cache(maxsize=None) def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]], Mapping[str, List[Bond]], Mapping[str, List[BondAngle]]]: stereo_chemical_props = resources.read_text('openfold.resources', 'stereo_chemical_props.txt') lines_iter = iter(stereo_chemical_props.splitlines()) residue_bonds: Dict[str, List[Bond]] = {} next(lines_iter) for line in lines_iter: if line.strip() == '-': break (bond, resname, bond_length, stddev) = line.split() (atom1, atom2) = bond.split('-') if resname not in residue_bonds: residue_bonds[resname] = [] residue_bonds[resname].append(Bond(atom1, atom2, float(bond_length), float(stddev))) residue_bonds['UNK'] = [] residue_bond_angles: Dict[str, List[BondAngle]] = {} next(lines_iter) next(lines_iter) for line in lines_iter: if line.strip() == '-': break (bond, resname, angle_degree, stddev_degree) = line.split() (atom1, atom2, atom3) = bond.split('-') if resname not in residue_bond_angles: residue_bond_angles[resname] = [] residue_bond_angles[resname].append(BondAngle(atom1, atom2, atom3, float(angle_degree) / 180.0 * np.pi, float(stddev_degree) / 180.0 * np.pi)) residue_bond_angles['UNK'] = [] def make_bond_key(atom1_name: str, atom2_name: str) -> str: return '-'.join(sorted([atom1_name, atom2_name])) residue_virtual_bonds: Dict[str, List[Bond]] = {} for (resname, bond_angles) in residue_bond_angles.items(): bond_cache: Dict[str, Bond] = {} for b in residue_bonds[resname]: bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b residue_virtual_bonds[resname] = [] for ba in bond_angles: bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)] bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)] gamma = ba.angle_rad length = np.sqrt(bond1.length ** 2 + bond2.length ** 2 - 2 * bond1.length * bond2.length * np.cos(gamma)) dl_outer = 0.5 / length dl_dgamma = 2 * bond1.length * bond2.length * np.sin(gamma) * dl_outer dl_db1 = (2 * bond1.length - 2 * bond2.length * np.cos(gamma)) * dl_outer dl_db2 = (2 * bond2.length - 2 * bond1.length * np.cos(gamma)) * dl_outer stddev = np.sqrt((dl_dgamma * ba.stddev) ** 2 + (dl_db1 * bond1.stddev) ** 2 + (dl_db2 * bond2.stddev) ** 2) residue_virtual_bonds[resname].append(Bond(ba.atom1_name, ba.atom3name, length, stddev)) return (residue_bonds, residue_virtual_bonds, residue_bond_angles) between_res_bond_length_c_n: Tuple[float, float] = (1.329, 1.341) between_res_bond_length_stddev_c_n: Tuple[float, float] = (0.014, 0.016) between_res_cos_angles_c_n_ca: Tuple[float, float] = (-0.5203, 0.0353) between_res_cos_angles_ca_c_n: Tuple[float, float] = (-0.4473, 0.0311) atom_types: List[str] = ['N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD', 'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3', 'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2', 'CZ3', 'NZ', 'OXT'] atom_order: Dict[str, int] = {atom_type: i for (i, atom_type) in enumerate(atom_types)} atom_type_num = len(atom_types) restype_name_to_atom14_names: Dict[str, List[str]] = {'ALA': ['N', 'CA', 'C', 'O', 'CB', '', '', '', '', '', '', '', '', ''], 'ARG': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2', '', '', ''], 'ASN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'ND2', '', '', '', '', '', ''], 'ASP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'OD1', 'OD2', '', '', '', '', '', ''], 'CYS': ['N', 'CA', 'C', 'O', 'CB', 'SG', '', '', '', '', '', '', '', ''], 'GLN': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'NE2', '', '', '', '', ''], 'GLU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'OE1', 'OE2', '', '', '', '', ''], 'GLY': ['N', 'CA', 'C', 'O', '', '', '', '', '', '', '', '', '', ''], 'HIS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'ND1', 'CD2', 'CE1', 'NE2', '', '', '', ''], 'ILE': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', 'CD1', '', '', '', '', '', ''], 'LEU': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', '', '', '', '', '', ''], 'LYS': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', 'CE', 'NZ', '', '', '', '', ''], 'MET': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'SD', 'CE', '', '', '', '', '', ''], 'PHE': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', '', '', ''], 'PRO': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD', '', '', '', '', '', '', ''], 'SER': ['N', 'CA', 'C', 'O', 'CB', 'OG', '', '', '', '', '', '', '', ''], 'THR': ['N', 'CA', 'C', 'O', 'CB', 'OG1', 'CG2', '', '', '', '', '', '', ''], 'TRP': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'NE1', 'CE2', 'CE3', 'CZ2', 'CZ3', 'CH2'], 'TYR': ['N', 'CA', 'C', 'O', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH', '', ''], 'VAL': ['N', 'CA', 'C', 'O', 'CB', 'CG1', 'CG2', '', '', '', '', '', '', ''], 'UNK': ['', '', '', '', '', '', '', '', '', '', '', '', '', '']} restypes: List[str] = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'] restype_order: Dict[str, int] = {restype: i for (i, restype) in enumerate(restypes)} restype_num = len(restypes) unk_restype_index = restype_num restypes_with_x: List[str] = restypes + ['X'] restype_order_with_x: Dict[str, int] = {restype: i for (i, restype) in enumerate(restypes_with_x)} def sequence_to_onehot(sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool=False) -> np.ndarray: num_entries = max(mapping.values()) + 1 if sorted(set(mapping.values())) != list(range(num_entries)): raise ValueError('The mapping must have values from 0 to num_unique_aas-1 without any gaps. Got: %s' % sorted(mapping.values())) one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32) for (aa_index, aa_type) in enumerate(sequence): if map_unknown_to_x: if aa_type.isalpha() and aa_type.isupper(): aa_id = mapping.get(aa_type, mapping['X']) else: raise ValueError(f'Invalid character in the sequence: {aa_type}') else: aa_id = mapping[aa_type] one_hot_arr[aa_index, aa_id] = 1 return one_hot_arr restype_1to3: Dict[str, str] = {'A': 'ALA', 'R': 'ARG', 'N': 'ASN', 'D': 'ASP', 'C': 'CYS', 'Q': 'GLN', 'E': 'GLU', 'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'L': 'LEU', 'K': 'LYS', 'M': 'MET', 'F': 'PHE', 'P': 'PRO', 'S': 'SER', 'T': 'THR', 'W': 'TRP', 'Y': 'TYR', 'V': 'VAL'} restype_3to1: Dict[str, str] = {v: k for (k, v) in restype_1to3.items()} unk_restype = 'UNK' resnames: List[str] = [restype_1to3[r] for r in restypes] + [unk_restype] resname_to_idx: Dict[str, int] = {resname: i for (i, resname) in enumerate(resnames)} HHBLITS_AA_TO_ID: Dict[str, int] = {'A': 0, 'B': 2, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7, 'J': 20, 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'O': 20, 'P': 12, 'Q': 13, 'R': 14, 'S': 15, 'T': 16, 'U': 1, 'V': 17, 'W': 18, 'X': 20, 'Y': 19, 'Z': 3, '-': 21} ID_TO_HHBLITS_AA: Dict[int, str] = {0: 'A', 1: 'C', 2: 'D', 3: 'E', 4: 'F', 5: 'G', 6: 'H', 7: 'I', 8: 'K', 9: 'L', 10: 'M', 11: 'N', 12: 'P', 13: 'Q', 14: 'R', 15: 'S', 16: 'T', 17: 'V', 18: 'W', 19: 'Y', 20: 'X', 21: '-'} restypes_with_x_and_gap: List[str] = restypes + ['X', '-'] MAP_HHBLITS_AATYPE_TO_OUR_AATYPE: Tuple[int, ...] = tuple((restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i]) for i in range(len(restypes_with_x_and_gap)))) def _make_standard_atom_mask() -> np.ndarray: mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32) for (restype, restype_letter) in enumerate(restypes): restype_name = restype_1to3[restype_letter] atom_names = residue_atoms[restype_name] for atom_name in atom_names: atom_type = atom_order[atom_name] mask[restype, atom_type] = 1 return mask STANDARD_ATOM_MASK = _make_standard_atom_mask() def chi_angle_atom(atom_index: int) -> np.ndarray: chi_angles_index = {} one_hots = [] for (k, v) in chi_angles_atoms.items(): indices = [atom_types.index(s[atom_index]) for s in v] indices.extend([-1] * (4 - len(indices))) chi_angles_index[k] = indices for r in restypes: res3 = restype_1to3[r] one_hot = np.eye(atom_type_num)[chi_angles_index[res3]] one_hots.append(one_hot) one_hots.append(np.zeros([4, atom_type_num])) one_hot = np.stack(one_hots, axis=0) one_hot = np.transpose(one_hot, [0, 2, 1]) return one_hot chi_atom_1_one_hot = chi_angle_atom(1) chi_atom_2_one_hot = chi_angle_atom(2) chi_angles_atom_indices_list: List[List[List[str]]] = [chi_angles_atoms[restype_1to3[r]] for r in restypes] chi_angles_atom_indices_ours: list = map_structure_with_atom_order(chi_angles_atom_indices_list) chi_angles_atom_indices = np.array([chi_atoms + [[0, 0, 0, 0]] * (4 - len(chi_atoms)) for chi_atoms in chi_angles_atom_indices_list]) chi_groups_for_atom: Dict[Tuple[str, str], List[Tuple[int, int]]] = collections.defaultdict(list) for (res_name, chi_angle_atoms_for_res) in chi_angles_atoms.items(): for (chi_group_i, chi_group) in enumerate(chi_angle_atoms_for_res): for (atom_i, atom) in enumerate(chi_group): chi_groups_for_atom[res_name, atom].append((chi_group_i, atom_i)) chi_groups_for_atom = dict(chi_groups_for_atom) def _make_rigid_transformation_4x4(ex: np.ndarray, ey: np.ndarray, translation: np.ndarray) -> np.ndarray: ex_normalized = ex / np.linalg.norm(ex) ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized ey_normalized /= np.linalg.norm(ey_normalized) eznorm = np.cross(ex_normalized, ey_normalized) m = np.stack([ex_normalized, ey_normalized, eznorm, translation]).transpose() m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0) return m restype_atom37_to_rigid_group = np.zeros([21, 37], dtype=int) restype_atom37_mask = np.zeros([21, 37], dtype=np.float32) restype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32) restype_atom14_to_rigid_group = np.zeros([21, 14], dtype=int) restype_atom14_mask = np.zeros([21, 14], dtype=np.float32) restype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32) restype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32) def _make_rigid_group_constants() -> None: for (restype, restype_letter) in enumerate(restypes): resname = restype_1to3[restype_letter] for (atomname, group_idx, atom_position) in rigid_group_atom_positions[resname]: atomtype = atom_order[atomname] restype_atom37_to_rigid_group[restype, atomtype] = group_idx restype_atom37_mask[restype, atomtype] = 1 restype_atom37_rigid_group_positions[restype, atomtype, :] = atom_position atom14idx = restype_name_to_atom14_names[resname].index(atomname) restype_atom14_to_rigid_group[restype, atom14idx] = group_idx restype_atom14_mask[restype, atom14idx] = 1 restype_atom14_rigid_group_positions[restype, atom14idx, :] = atom_position for (restype, restype_letter) in enumerate(restypes): resname = restype_1to3[restype_letter] atom_positions: Dict[str, np.ndarray] = {name: np.array(pos) for (name, _, pos) in rigid_group_atom_positions[resname]} restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4) restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4) mat = _make_rigid_transformation_4x4(ex=atom_positions['N'] - atom_positions['CA'], ey=np.array([1.0, 0.0, 0.0]), translation=atom_positions['N']) restype_rigid_group_default_frame[restype, 2, :, :] = mat mat = _make_rigid_transformation_4x4(ex=atom_positions['C'] - atom_positions['CA'], ey=atom_positions['CA'] - atom_positions['N'], translation=atom_positions['C']) restype_rigid_group_default_frame[restype, 3, :, :] = mat if chi_angles_mask[restype][0]: base_atom_names = chi_angles_atoms[resname][0] base_atom_positions = [atom_positions[name] for name in base_atom_names] mat = _make_rigid_transformation_4x4(ex=base_atom_positions[2] - base_atom_positions[1], ey=base_atom_positions[0] - base_atom_positions[1], translation=base_atom_positions[2]) restype_rigid_group_default_frame[restype, 4, :, :] = mat for chi_idx in range(1, 4): if chi_angles_mask[restype][chi_idx]: axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2] axis_end_atom_position = atom_positions[axis_end_atom_name] mat = _make_rigid_transformation_4x4(ex=axis_end_atom_position, ey=np.array([-1.0, 0.0, 0.0]), translation=axis_end_atom_position) restype_rigid_group_default_frame[restype, 4 + chi_idx, :, :] = mat _make_rigid_group_constants() def make_atom14_dists_bounds(overlap_tolerance: float=1.5, bond_length_tolerance_factor: int=15) -> Dict[str, np.ndarray]: restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32) restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32) restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32) (residue_bonds, residue_virtual_bonds, _) = load_stereo_chemical_props() for (restype, restype_letter) in enumerate(restypes): resname = restype_1to3[restype_letter] atom_list = restype_name_to_atom14_names[resname] for (atom1_idx, atom1_name) in enumerate(atom_list): if not atom1_name: continue atom1_radius = van_der_waals_radius[atom1_name[0]] for (atom2_idx, atom2_name) in enumerate(atom_list): if not atom2_name or atom1_idx == atom2_idx: continue atom2_radius = van_der_waals_radius[atom2_name[0]] lower = atom1_radius + atom2_radius - overlap_tolerance upper = 10000000000.0 restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper for b in residue_bonds[resname] + residue_virtual_bonds[resname]: atom1_idx = atom_list.index(b.atom1_name) atom2_idx = atom_list.index(b.atom2_name) lower = b.length - bond_length_tolerance_factor * b.stddev upper = b.length + bond_length_tolerance_factor * b.stddev restype_atom14_bond_lower_bound[restype, atom1_idx, atom2_idx] = lower restype_atom14_bond_lower_bound[restype, atom2_idx, atom1_idx] = lower restype_atom14_bond_upper_bound[restype, atom1_idx, atom2_idx] = upper restype_atom14_bond_upper_bound[restype, atom2_idx, atom1_idx] = upper restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev return {'lower_bound': restype_atom14_bond_lower_bound, 'upper_bound': restype_atom14_bond_upper_bound, 'stddev': restype_atom14_bond_stddev} restype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32) restype_atom14_ambiguous_atoms_swap_idx: np.ndarray = np.tile(np.arange(14, dtype=int), (21, 1)) def _make_atom14_ambiguity_feats() -> None: for (res, pairs) in residue_atom_renaming_swaps.items(): res_idx = restype_order[restype_3to1[res]] for (atom1, atom2) in pairs.items(): atom1_idx = restype_name_to_atom14_names[res].index(atom1) atom2_idx = restype_name_to_atom14_names[res].index(atom2) restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1 restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1 restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom1_idx] = atom2_idx restype_atom14_ambiguous_atoms_swap_idx[res_idx, atom2_idx] = atom1_idx _make_atom14_ambiguity_feats() def aatype_to_str_sequence(aatype: Sequence[int]) -> str: return ''.join([restypes_with_x[aatype[i]] for i in range(len(aatype))]) # File: transformers-main/src/transformers/models/esm/openfold_utils/rigid_utils.py from __future__ import annotations from functools import lru_cache from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple import numpy as np import torch def rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: def row_mul(i: int) -> torch.Tensor: return torch.stack([a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0], a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1], a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2]], dim=-1) return torch.stack([row_mul(0), row_mul(1), row_mul(2)], dim=-2) def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor: (x, y, z) = torch.unbind(t, dim=-1) return torch.stack([r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z, r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z, r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z], dim=-1) @lru_cache(maxsize=None) def identity_rot_mats(batch_dims: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True) -> torch.Tensor: rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad) rots = rots.view(*(1,) * len(batch_dims), 3, 3) rots = rots.expand(*batch_dims, -1, -1) rots = rots.contiguous() return rots @lru_cache(maxsize=None) def identity_trans(batch_dims: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True) -> torch.Tensor: trans = torch.zeros((*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad) return trans @lru_cache(maxsize=None) def identity_quats(batch_dims: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True) -> torch.Tensor: quat = torch.zeros((*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad) with torch.no_grad(): quat[..., 0] = 1 return quat _quat_elements: List[str] = ['a', 'b', 'c', 'd'] _qtr_keys: List[str] = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements] _qtr_ind_dict: Dict[str, int] = {key: ind for (ind, key) in enumerate(_qtr_keys)} def _to_mat(pairs: List[Tuple[str, int]]) -> np.ndarray: mat = np.zeros((4, 4)) for (key, value) in pairs: ind = _qtr_ind_dict[key] mat[ind // 4][ind % 4] = value return mat _QTR_MAT = np.zeros((4, 4, 3, 3)) _QTR_MAT[..., 0, 0] = _to_mat([('aa', 1), ('bb', 1), ('cc', -1), ('dd', -1)]) _QTR_MAT[..., 0, 1] = _to_mat([('bc', 2), ('ad', -2)]) _QTR_MAT[..., 0, 2] = _to_mat([('bd', 2), ('ac', 2)]) _QTR_MAT[..., 1, 0] = _to_mat([('bc', 2), ('ad', 2)]) _QTR_MAT[..., 1, 1] = _to_mat([('aa', 1), ('bb', -1), ('cc', 1), ('dd', -1)]) _QTR_MAT[..., 1, 2] = _to_mat([('cd', 2), ('ab', -2)]) _QTR_MAT[..., 2, 0] = _to_mat([('bd', 2), ('ac', -2)]) _QTR_MAT[..., 2, 1] = _to_mat([('cd', 2), ('ab', 2)]) _QTR_MAT[..., 2, 2] = _to_mat([('aa', 1), ('bb', -1), ('cc', -1), ('dd', 1)]) def quat_to_rot(quat: torch.Tensor) -> torch.Tensor: quat = quat[..., None] * quat[..., None, :] mat = _get_quat('_QTR_MAT', dtype=quat.dtype, device=quat.device) shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape) quat = quat[..., None, None] * shaped_qtr_mat return torch.sum(quat, dim=(-3, -4)) def rot_to_quat(rot: torch.Tensor) -> torch.Tensor: if rot.shape[-2:] != (3, 3): raise ValueError('Input rotation is incorrectly shaped') [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = [[rot[..., i, j] for j in range(3)] for i in range(3)] k = [[xx + yy + zz, zy - yz, xz - zx, yx - xy], [zy - yz, xx - yy - zz, xy + yx, xz + zx], [xz - zx, xy + yx, yy - xx - zz, yz + zy], [yx - xy, xz + zx, yz + zy, zz - xx - yy]] (_, vectors) = torch.linalg.eigh(1.0 / 3.0 * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2)) return vectors[..., -1] _QUAT_MULTIPLY = np.zeros((4, 4, 4)) _QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]] _QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]] _QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]] _QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]] _QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :] _CACHED_QUATS: Dict[str, np.ndarray] = {'_QTR_MAT': _QTR_MAT, '_QUAT_MULTIPLY': _QUAT_MULTIPLY, '_QUAT_MULTIPLY_BY_VEC': _QUAT_MULTIPLY_BY_VEC} @lru_cache(maxsize=None) def _get_quat(quat_key: str, dtype: torch.dtype, device: torch.device) -> torch.Tensor: return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device) def quat_multiply(quat1: torch.Tensor, quat2: torch.Tensor) -> torch.Tensor: mat = _get_quat('_QUAT_MULTIPLY', dtype=quat1.dtype, device=quat1.device) reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape) return torch.sum(reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None], dim=(-3, -2)) def quat_multiply_by_vec(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: mat = _get_quat('_QUAT_MULTIPLY_BY_VEC', dtype=quat.dtype, device=quat.device) reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape) return torch.sum(reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2)) def invert_rot_mat(rot_mat: torch.Tensor) -> torch.Tensor: return rot_mat.transpose(-1, -2) def invert_quat(quat: torch.Tensor) -> torch.Tensor: quat_prime = quat.clone() quat_prime[..., 1:] *= -1 inv = quat_prime / torch.sum(quat ** 2, dim=-1, keepdim=True) return inv class Rotation: def __init__(self, rot_mats: Optional[torch.Tensor]=None, quats: Optional[torch.Tensor]=None, normalize_quats: bool=True): if rot_mats is None and quats is None or (rot_mats is not None and quats is not None): raise ValueError('Exactly one input argument must be specified') if rot_mats is not None and rot_mats.shape[-2:] != (3, 3) or (quats is not None and quats.shape[-1] != 4): raise ValueError('Incorrectly shaped rotation matrix or quaternion') if quats is not None: quats = quats.to(dtype=torch.float32) if rot_mats is not None: rot_mats = rot_mats.to(dtype=torch.float32) if quats is not None and normalize_quats: quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True) self._rot_mats = rot_mats self._quats = quats @staticmethod def identity(shape, dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rotation: if fmt == 'rot_mat': rot_mats = identity_rot_mats(shape, dtype, device, requires_grad) return Rotation(rot_mats=rot_mats, quats=None) elif fmt == 'quat': quats = identity_quats(shape, dtype, device, requires_grad) return Rotation(rot_mats=None, quats=quats, normalize_quats=False) else: raise ValueError(f'Invalid format: f{fmt}') def __getitem__(self, index: Any) -> Rotation: if type(index) is not tuple: index = (index,) if self._rot_mats is not None: rot_mats = self._rot_mats[index + (slice(None), slice(None))] return Rotation(rot_mats=rot_mats) elif self._quats is not None: quats = self._quats[index + (slice(None),)] return Rotation(quats=quats, normalize_quats=False) else: raise ValueError('Both rotations are None') def __mul__(self, right: torch.Tensor) -> Rotation: if not isinstance(right, torch.Tensor): raise TypeError('The other multiplicand must be a Tensor') if self._rot_mats is not None: rot_mats = self._rot_mats * right[..., None, None] return Rotation(rot_mats=rot_mats, quats=None) elif self._quats is not None: quats = self._quats * right[..., None] return Rotation(rot_mats=None, quats=quats, normalize_quats=False) else: raise ValueError('Both rotations are None') def __rmul__(self, left: torch.Tensor) -> Rotation: return self.__mul__(left) @property def shape(self) -> torch.Size: if self._rot_mats is not None: return self._rot_mats.shape[:-2] elif self._quats is not None: return self._quats.shape[:-1] else: raise ValueError('Both rotations are None') @property def dtype(self) -> torch.dtype: if self._rot_mats is not None: return self._rot_mats.dtype elif self._quats is not None: return self._quats.dtype else: raise ValueError('Both rotations are None') @property def device(self) -> torch.device: if self._rot_mats is not None: return self._rot_mats.device elif self._quats is not None: return self._quats.device else: raise ValueError('Both rotations are None') @property def requires_grad(self) -> bool: if self._rot_mats is not None: return self._rot_mats.requires_grad elif self._quats is not None: return self._quats.requires_grad else: raise ValueError('Both rotations are None') def get_rot_mats(self) -> torch.Tensor: if self._rot_mats is not None: return self._rot_mats elif self._quats is not None: return quat_to_rot(self._quats) else: raise ValueError('Both rotations are None') def get_quats(self) -> torch.Tensor: if self._rot_mats is not None: return rot_to_quat(self._rot_mats) elif self._quats is not None: return self._quats else: raise ValueError('Both rotations are None') def get_cur_rot(self) -> torch.Tensor: if self._rot_mats is not None: return self._rot_mats elif self._quats is not None: return self._quats else: raise ValueError('Both rotations are None') def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation: quats = self.get_quats() new_quats = quats + quat_multiply_by_vec(quats, q_update_vec) return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats) def compose_r(self, r: Rotation) -> Rotation: r1 = self.get_rot_mats() r2 = r.get_rot_mats() new_rot_mats = rot_matmul(r1, r2) return Rotation(rot_mats=new_rot_mats, quats=None) def compose_q(self, r: Rotation, normalize_quats: bool=True) -> Rotation: q1 = self.get_quats() q2 = r.get_quats() new_quats = quat_multiply(q1, q2) return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats) def apply(self, pts: torch.Tensor) -> torch.Tensor: rot_mats = self.get_rot_mats() return rot_vec_mul(rot_mats, pts) def invert_apply(self, pts: torch.Tensor) -> torch.Tensor: rot_mats = self.get_rot_mats() inv_rot_mats = invert_rot_mat(rot_mats) return rot_vec_mul(inv_rot_mats, pts) def invert(self) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=invert_quat(self._quats), normalize_quats=False) else: raise ValueError('Both rotations are None') def unsqueeze(self, dim: int) -> Rotation: if dim >= len(self.shape): raise ValueError('Invalid dimension') if self._rot_mats is not None: rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2) return Rotation(rot_mats=rot_mats, quats=None) elif self._quats is not None: quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1) return Rotation(rot_mats=None, quats=quats, normalize_quats=False) else: raise ValueError('Both rotations are None') @staticmethod def cat(rs: Sequence[Rotation], dim: int) -> Rotation: rot_mats = torch.cat([r.get_rot_mats() for r in rs], dim=dim if dim >= 0 else dim - 2) return Rotation(rot_mats=rot_mats, quats=None) def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation: if self._rot_mats is not None: rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,)) rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1) rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3)) return Rotation(rot_mats=rot_mats, quats=None) elif self._quats is not None: quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1) return Rotation(rot_mats=None, quats=quats, normalize_quats=False) else: raise ValueError('Both rotations are None') def cuda(self) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=self._rot_mats.cuda(), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False) else: raise ValueError('Both rotations are None') def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=self._rot_mats.to(device=device, dtype=dtype), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=self._quats.to(device=device, dtype=dtype), normalize_quats=False) else: raise ValueError('Both rotations are None') def detach(self) -> Rotation: if self._rot_mats is not None: return Rotation(rot_mats=self._rot_mats.detach(), quats=None) elif self._quats is not None: return Rotation(rot_mats=None, quats=self._quats.detach(), normalize_quats=False) else: raise ValueError('Both rotations are None') class Rigid: def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]): (batch_dims, dtype, device, requires_grad) = (None, None, None, None) if trans is not None: batch_dims = trans.shape[:-1] dtype = trans.dtype device = trans.device requires_grad = trans.requires_grad elif rots is not None: batch_dims = rots.shape dtype = rots.dtype device = rots.device requires_grad = rots.requires_grad else: raise ValueError('At least one input argument must be specified') if rots is None: rots = Rotation.identity(batch_dims, dtype, device, requires_grad) elif trans is None: trans = identity_trans(batch_dims, dtype, device, requires_grad) assert rots is not None assert trans is not None if rots.shape != trans.shape[:-1] or rots.device != trans.device: raise ValueError('Rots and trans incompatible') trans = trans.to(dtype=torch.float32) self._rots = rots self._trans = trans @staticmethod def identity(shape: Tuple[int, ...], dtype: Optional[torch.dtype]=None, device: Optional[torch.device]=None, requires_grad: bool=True, fmt: str='quat') -> Rigid: return Rigid(Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt), identity_trans(shape, dtype, device, requires_grad)) def __getitem__(self, index: Any) -> Rigid: if type(index) is not tuple: index = (index,) return Rigid(self._rots[index], self._trans[index + (slice(None),)]) def __mul__(self, right: torch.Tensor) -> Rigid: if not isinstance(right, torch.Tensor): raise TypeError('The other multiplicand must be a Tensor') new_rots = self._rots * right new_trans = self._trans * right[..., None] return Rigid(new_rots, new_trans) def __rmul__(self, left: torch.Tensor) -> Rigid: return self.__mul__(left) @property def shape(self) -> torch.Size: return self._trans.shape[:-1] @property def device(self) -> torch.device: return self._trans.device def get_rots(self) -> Rotation: return self._rots def get_trans(self) -> torch.Tensor: return self._trans def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid: (q_vec, t_vec) = (q_update_vec[..., :3], q_update_vec[..., 3:]) new_rots = self._rots.compose_q_update_vec(q_vec) trans_update = self._rots.apply(t_vec) new_translation = self._trans + trans_update return Rigid(new_rots, new_translation) def compose(self, r: Rigid) -> Rigid: new_rot = self._rots.compose_r(r._rots) new_trans = self._rots.apply(r._trans) + self._trans return Rigid(new_rot, new_trans) def apply(self, pts: torch.Tensor) -> torch.Tensor: rotated = self._rots.apply(pts) return rotated + self._trans def invert_apply(self, pts: torch.Tensor) -> torch.Tensor: pts = pts - self._trans return self._rots.invert_apply(pts) def invert(self) -> Rigid: rot_inv = self._rots.invert() trn_inv = rot_inv.apply(self._trans) return Rigid(rot_inv, -1 * trn_inv) def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid: new_rots = self._rots.map_tensor_fn(fn) new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1) return Rigid(new_rots, new_trans) def to_tensor_4x4(self) -> torch.Tensor: tensor = self._trans.new_zeros((*self.shape, 4, 4)) tensor[..., :3, :3] = self._rots.get_rot_mats() tensor[..., :3, 3] = self._trans tensor[..., 3, 3] = 1 return tensor @staticmethod def from_tensor_4x4(t: torch.Tensor) -> Rigid: if t.shape[-2:] != (4, 4): raise ValueError('Incorrectly shaped input tensor') rots = Rotation(rot_mats=t[..., :3, :3], quats=None) trans = t[..., :3, 3] return Rigid(rots, trans) def to_tensor_7(self) -> torch.Tensor: tensor = self._trans.new_zeros((*self.shape, 7)) tensor[..., :4] = self._rots.get_quats() tensor[..., 4:] = self._trans return tensor @staticmethod def from_tensor_7(t: torch.Tensor, normalize_quats: bool=False) -> Rigid: if t.shape[-1] != 7: raise ValueError('Incorrectly shaped input tensor') (quats, trans) = (t[..., :4], t[..., 4:]) rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats) return Rigid(rots, trans) @staticmethod def from_3_points(p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float=1e-08) -> Rigid: p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1) origin_unbound = torch.unbind(origin, dim=-1) p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1) e0 = [c1 - c2 for (c1, c2) in zip(origin_unbound, p_neg_x_axis_unbound)] e1 = [c1 - c2 for (c1, c2) in zip(p_xy_plane_unbound, origin_unbound)] denom = torch.sqrt(sum((c * c for c in e0)) + eps * torch.ones_like(e0[0])) e0 = [c / denom for c in e0] dot = sum((c1 * c2 for (c1, c2) in zip(e0, e1))) e1 = [c2 - c1 * dot for (c1, c2) in zip(e0, e1)] denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0])) e1 = [c / denom for c in e1] e2 = [e0[1] * e1[2] - e0[2] * e1[1], e0[2] * e1[0] - e0[0] * e1[2], e0[0] * e1[1] - e0[1] * e1[0]] rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1) rots = rots.reshape(rots.shape[:-1] + (3, 3)) rot_obj = Rotation(rot_mats=rots, quats=None) return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1)) def unsqueeze(self, dim: int) -> Rigid: if dim >= len(self.shape): raise ValueError('Invalid dimension') rots = self._rots.unsqueeze(dim) trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1) return Rigid(rots, trans) @staticmethod def cat(ts: Sequence[Rigid], dim: int) -> Rigid: rots = Rotation.cat([t._rots for t in ts], dim) trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1) return Rigid(rots, trans) def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid: return Rigid(fn(self._rots), self._trans) def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid: return Rigid(self._rots, fn(self._trans)) def scale_translation(self, trans_scale_factor: float) -> Rigid: return self.apply_trans_fn(lambda t: t * trans_scale_factor) def stop_rot_gradient(self) -> Rigid: return self.apply_rot_fn(lambda r: r.detach()) @staticmethod def make_transform_from_reference(n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float=1e-20) -> Rigid: translation = -1 * ca_xyz n_xyz = n_xyz + translation c_xyz = c_xyz + translation (c_x, c_y, c_z) = [c_xyz[..., i] for i in range(3)] norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2) sin_c1 = -c_y / norm cos_c1 = c_x / norm c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3)) c1_rots[..., 0, 0] = cos_c1 c1_rots[..., 0, 1] = -1 * sin_c1 c1_rots[..., 1, 0] = sin_c1 c1_rots[..., 1, 1] = cos_c1 c1_rots[..., 2, 2] = 1 norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2) sin_c2 = c_z / norm cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3)) c2_rots[..., 0, 0] = cos_c2 c2_rots[..., 0, 2] = sin_c2 c2_rots[..., 1, 1] = 1 c2_rots[..., 2, 0] = -1 * sin_c2 c2_rots[..., 2, 2] = cos_c2 c_rots = rot_matmul(c2_rots, c1_rots) n_xyz = rot_vec_mul(c_rots, n_xyz) (_, n_y, n_z) = [n_xyz[..., i] for i in range(3)] norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2) sin_n = -n_z / norm cos_n = n_y / norm n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3)) n_rots[..., 0, 0] = 1 n_rots[..., 1, 1] = cos_n n_rots[..., 1, 2] = -1 * sin_n n_rots[..., 2, 1] = sin_n n_rots[..., 2, 2] = cos_n rots = rot_matmul(n_rots, c_rots) rots = rots.transpose(-1, -2) translation = -1 * translation rot_obj = Rotation(rot_mats=rots, quats=None) return Rigid(rot_obj, translation) def cuda(self) -> Rigid: return Rigid(self._rots.cuda(), self._trans.cuda()) # File: transformers-main/src/transformers/models/esm/openfold_utils/tensor_utils.py from functools import partial from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload import torch import torch.nn as nn import torch.types def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor: if not inplace: m1 = m1 + m2 else: m1 += m2 return m1 def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor: zero_index = -1 * len(inds) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute(first_inds + [zero_index + i for i in inds]) def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor: return t.reshape(t.shape[:-no_dims] + (-1,)) def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float=0.0001) -> torch.Tensor: mask = mask.expand(*value.shape) return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim)) def pts_to_distogram(pts: torch.Tensor, min_bin: torch.types.Number=2.3125, max_bin: torch.types.Number=21.6875, no_bins: int=64) -> torch.Tensor: boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device) dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1)) return torch.bucketize(dists, boundaries) def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict: first = dicts[0] new_dict = {} for (k, v) in first.items(): all_v = [d[k] for d in dicts] if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) return new_dict def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor: reshaped_bins = v_bins.view((1,) * len(x.shape) + (len(v_bins),)) diffs = x[..., None] - reshaped_bins am = torch.argmin(torch.abs(diffs), dim=-1) return nn.functional.one_hot(am, num_classes=len(v_bins)).float() def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int=0, no_batch_dims: int=0) -> torch.Tensor: ranges: List[Union[slice, torch.Tensor]] = [] for (i, s) in enumerate(data.shape[:no_batch_dims]): r = torch.arange(s) r = r.view(*(*(1,) * i, -1, *(1,) * (len(inds.shape) - i - 1))) ranges.append(r) remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)] remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds ranges.extend(remaining_dims) return data[tuple(ranges)] T = TypeVar('T') def dict_map(fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T]) -> Dict[Any, Union[dict, list, tuple, Any]]: new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {} for (k, v) in dic.items(): if isinstance(v, dict): new_dict[k] = dict_map(fn, v, leaf_type) else: new_dict[k] = tree_map(fn, v, leaf_type) return new_dict @overload def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any: ... @overload def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict: ... @overload def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list: ... @overload def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple: ... def tree_map(fn, tree, leaf_type): if isinstance(tree, dict): return dict_map(fn, tree, leaf_type) elif isinstance(tree, list): return [tree_map(fn, x, leaf_type) for x in tree] elif isinstance(tree, tuple): return tuple((tree_map(fn, x, leaf_type) for x in tree)) elif isinstance(tree, leaf_type): return fn(tree) else: print(type(tree)) raise TypeError('Not supported') tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor) # File: transformers-main/src/transformers/models/esm/tokenization_esm.py """""" import os from typing import List, Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} def load_vocab_file(vocab_file): with open(vocab_file, 'r') as f: lines = f.read().splitlines() return [l.strip() for l in lines] class EsmTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, unk_token='', cls_token='', pad_token='', mask_token='', eos_token='', **kwargs): self.all_tokens = load_vocab_file(vocab_file) self._id_to_token = dict(enumerate(self.all_tokens)) self._token_to_id = {tok: ind for (ind, tok) in enumerate(self.all_tokens)} super().__init__(unk_token=unk_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, eos_token=eos_token, **kwargs) self.unique_no_split_tokens = self.all_tokens self._update_trie(self.unique_no_split_tokens) def _convert_id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def _convert_token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def _tokenize(self, text, **kwargs): return text.split() def get_vocab(self): base_vocab = self._token_to_id.copy() base_vocab.update(self.added_tokens_encoder) return base_vocab def token_to_id(self, token: str) -> int: return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) def id_to_token(self, index: int) -> str: return self._id_to_token.get(index, self.unk_token) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: cls = [self.cls_token_id] sep = [self.eos_token_id] if token_ids_1 is None: if self.eos_token_id is None: return cls + token_ids_0 else: return cls + token_ids_0 + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!') return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if token_ids_1 is not None: raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return [1 if token in self.all_special_ids else 0 for token in token_ids_0] mask = [1] + [0] * len(token_ids_0) + [1] if token_ids_1 is not None: mask += [0] * len(token_ids_1) + [1] return mask def save_vocabulary(self, save_directory, filename_prefix): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt') with open(vocab_file, 'w') as f: f.write('\n'.join(self.all_tokens)) return (vocab_file,) @property def vocab_size(self) -> int: return len(self.all_tokens) # File: transformers-main/src/transformers/models/falcon/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_falcon': ['FalconConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_falcon'] = ['FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering'] if TYPE_CHECKING: from .configuration_falcon import FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/falcon/configuration_falcon.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FalconConfig(PretrainedConfig): model_type = 'falcon' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=65024, hidden_size=4544, num_hidden_layers=32, num_attention_heads=71, num_ln_in_parallel_attn=None, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, hidden_dropout=0.0, attention_dropout=0.0, num_kv_heads=None, alibi=False, new_decoder_architecture=False, multi_query=True, parallel_attn=True, bias=False, max_position_embeddings=2048, rope_theta=10000.0, rope_scaling=None, bos_token_id=11, eos_token_id=11, ffn_hidden_size=None, activation='gelu', **kwargs): self.vocab_size = vocab_size n_embed = kwargs.pop('n_embed', None) self.hidden_size = hidden_size if n_embed is None else n_embed self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads self.alibi = alibi self.new_decoder_architecture = new_decoder_architecture self.multi_query = multi_query self.parallel_attn = parallel_attn self.bias = bias self.num_ln_in_parallel_attn = num_ln_in_parallel_attn self.max_position_embeddings = max_position_embeddings self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.activation = activation if ffn_hidden_size is None: self.ffn_hidden_size = hidden_size * 4 else: self.ffn_hidden_size = ffn_hidden_size super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @property def head_dim(self): return self.hidden_size // self.num_attention_heads @property def rotary(self): return not self.alibi # File: transformers-main/src/transformers/models/falcon/convert_custom_code_checkpoint.py import json from argparse import ArgumentParser from pathlib import Path '' if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--checkpoint_dir', type=Path, required=True, help='Directory containing a custom code checkpoint to convert to a modern Falcon checkpoint.') args = parser.parse_args() if not args.checkpoint_dir.is_dir(): raise ValueError('--checkpoint_dir argument should be a directory!') if not (args.checkpoint_dir / 'configuration_RW.py').is_file() or not (args.checkpoint_dir / 'modelling_RW.py').is_file(): raise ValueError('The model directory should contain configuration_RW.py and modelling_RW.py files! Are you sure this is a custom code checkpoint?') (args.checkpoint_dir / 'configuration_RW.py').unlink() (args.checkpoint_dir / 'modelling_RW.py').unlink() config = args.checkpoint_dir / 'config.json' text = config.read_text() text = text.replace('RWForCausalLM', 'FalconForCausalLM') text = text.replace('RefinedWebModel', 'falcon') text = text.replace('RefinedWeb', 'falcon') json_config = json.loads(text) del json_config['auto_map'] if 'n_head' in json_config: json_config['num_attention_heads'] = json_config.pop('n_head') if 'n_layer' in json_config: json_config['num_hidden_layers'] = json_config.pop('n_layer') if 'n_head_kv' in json_config: json_config['num_kv_heads'] = json_config.pop('n_head_kv') json_config['new_decoder_architecture'] = True else: json_config['new_decoder_architecture'] = False bos_token_id = json_config.get('bos_token_id', 1) eos_token_id = json_config.get('eos_token_id', 2) config.unlink() config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) tokenizer_config = args.checkpoint_dir / 'tokenizer_config.json' if tokenizer_config.is_file(): text = tokenizer_config.read_text() json_config = json.loads(text) if json_config['tokenizer_class'] == 'PreTrainedTokenizerFast': json_config['model_input_names'] = ['input_ids', 'attention_mask'] tokenizer_config.unlink() tokenizer_config.write_text(json.dumps(json_config, indent=2, sort_keys=True)) generation_config_path = args.checkpoint_dir / 'generation_config.json' generation_dict = {'_from_model_config': True, 'bos_token_id': bos_token_id, 'eos_token_id': eos_token_id, 'transformers_version': '4.33.0.dev0'} generation_config_path.write_text(json.dumps(generation_dict, indent=2, sort_keys=True)) print('Done! Please double-check that the new checkpoint works as expected.') # File: transformers-main/src/transformers/models/falcon/modeling_falcon.py """""" import math from typing import TYPE_CHECKING, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss from torch.nn import functional as F from ...activations import get_activation from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_2_0 from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging from .configuration_falcon import FalconConfig if TYPE_CHECKING: from ...configuration_utils import PretrainedConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'Rocketknight1/falcon-rw-1b' _CONFIG_FOR_DOC = 'FalconConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class FalconLinear(nn.Linear): def forward(self, input: torch.Tensor) -> torch.Tensor: hidden_states = input @ self.weight.T if self.bias is None: return hidden_states return hidden_states + self.bias def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class FalconRotaryEmbedding(nn.Module): def __init__(self, dim=None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, rope_type='default', config: Optional[FalconConfig]=None): super().__init__() self.rope_kwargs = {} if config is None: logger.warning_once('`FalconRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46') self.rope_kwargs = {'rope_type': rope_type, 'factor': scaling_factor, 'dim': dim, 'base': base, 'max_position_embeddings': max_position_embeddings} self.rope_type = rope_type self.max_seq_len_cached = max_position_embeddings self.original_max_seq_len = max_position_embeddings else: if config.rope_scaling is not None: self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, seq_len=seq_len, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if 'dynamic' in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() cos = cos * self.attention_scaling sin = sin * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) class FalconLinearScalingRotaryEmbedding(FalconRotaryEmbedding): def __init__(self, *args, **kwargs): logger.warning_once('`FalconLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use `FalconRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__).') kwargs['rope_type'] = 'linear' super().__init__(*args, **kwargs) class FalconDynamicNTKScalingRotaryEmbedding(FalconRotaryEmbedding): def __init__(self, *args, **kwargs): logger.warning_once('`FalconDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use `FalconRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to __init__).') kwargs['rope_type'] = 'dynamic' super().__init__(*args, **kwargs) def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: (batch_size, seq_length) = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor(2 ** (-2 ** (-(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor(2 ** (-2 ** (-(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None].bfloat16() * arange_tensor return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: out = F.dropout(x, p=prob, training=training) out = residual + out return out class FalconAttention(nn.Module): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self._use_sdpa = config._attn_implementation == 'sdpa' self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = self.inv_norm_factor if config.new_decoder_architecture: qkv_out_dim = (config.num_kv_heads * 2 + config.num_attention_heads) * self.head_dim elif config.multi_query: qkv_out_dim = self.hidden_size + 2 * self.head_dim else: qkv_out_dim = 3 * self.hidden_size self.query_key_value = FalconLinear(self.hidden_size, qkv_out_dim, bias=config.bias) self.new_decoder_architecture = config.new_decoder_architecture self.multi_query = config.multi_query self.dense = FalconLinear(self.hidden_size, self.hidden_size, bias=config.bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.num_kv_heads = config.num_kv_heads if self.new_decoder_architecture or not self.multi_query else 1 if config.rotary: self.rotary_emb = FalconRotaryEmbedding(config=self.config) def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if self.new_decoder_architecture: (batch, seq_len, _) = fused_qkv.shape qkv = fused_qkv.view(batch, seq_len, -1, self.num_heads // self.num_kv_heads + 2, self.head_dim) query = qkv[:, :, :, :-2] key = qkv[:, :, :, [-2]] value = qkv[:, :, :, [-1]] key = torch.broadcast_to(key, query.shape) value = torch.broadcast_to(value, query.shape) (query, key, value) = [x.flatten(2, 3) for x in (query, key, value)] return (query, key, value) elif not self.multi_query: (batch_size, seq_length, three_times_hidden_size) = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]) else: (batch_size, seq_length, three_times_hidden_size) = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads + 2, self.head_dim) return (fused_qkv[..., :-2, :], fused_qkv[..., [-2], :], fused_qkv[..., [-1], :]) def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: (batch_size_and_num_heads, seq_length, _) = x.shape batch_size = batch_size_and_num_heads // self.num_heads x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) x = x.permute(0, 2, 1, 3) return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): fused_qkv = self.query_key_value(hidden_states) num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) (batch_size, query_length, _, _) = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: if position_embeddings is None: logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.') (cos, sin) = self.rotary_emb(value_layer, position_ids) else: (cos, sin) = position_embeddings (query_layer, key_layer) = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} if alibi is None: cache_kwargs.update({'sin': sin, 'cos': cos}) (key_layer, value_layer) = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) kv_length = key_layer.shape[-2] if self._use_sdpa and query_layer.device.type == 'cuda' and (attention_mask is not None): query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() if attention_mask is not None: attention_mask = attention_mask[:, :, :, :key_layer.shape[-2]] if alibi is None: if self._use_sdpa and (not output_attentions): is_causal = True if self.is_causal and attention_mask is None and (query_length > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=0.0, is_causal=is_causal) attention_scores = None else: attention_scores = query_layer @ key_layer.transpose(-1, -2) attention_scores /= math.sqrt(self.head_dim) attention_scores = F.softmax(attention_scores + attention_mask, dim=-1, dtype=hidden_states.dtype) attn_output = attention_scores @ value_layer attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim) attn_output = attn_output.permute(0, 2, 1, 3) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) if output_attentions: return (attn_output, layer_past, attention_scores) else: return (attn_output, layer_past) else: if self._use_sdpa and (not output_attentions) and (head_mask is None): is_causal = True if self.is_causal and attention_mask is None and (query_length > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_output) else: matmul_result = query_layer @ key_layer.transpose(-1, -2) attention_scores = matmul_result.view(batch_size, self.num_heads, query_length, kv_length) input_dtype = attention_scores.dtype if input_dtype == torch.float16 or input_dtype == torch.bfloat16: attention_scores = attention_scores.to(torch.float32) attention_logits = attention_scores + alibi.view(batch_size, self.num_heads, 1, -1) attention_logits *= self.inv_norm_factor attention_probs = F.softmax(attention_logits + attention_mask, dim=-1, dtype=hidden_states.dtype) attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask attention_probs_reshaped = attention_probs.view(batch_size, self.num_heads, query_length, kv_length) attn_output = (attention_probs_reshaped @ value_layer).flatten(0, 1) attn_output = self._merge_heads(attn_output) attn_output = self.dense(attn_output) if output_attentions: return (attn_output, layer_past, attention_probs) else: return (attn_output, layer_past) class FalconFlashAttention2(FalconAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): fused_qkv = self.query_key_value(hidden_states) num_kv_heads = self.num_heads if self.new_decoder_architecture else self.num_kv_heads (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) (batch_size, query_length, _, _) = query_layer.shape query_layer = query_layer.transpose(1, 2).reshape(batch_size, self.num_heads, query_length, self.head_dim) key_layer = key_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) value_layer = value_layer.transpose(1, 2).reshape(batch_size, num_kv_heads, query_length, self.head_dim) if alibi is None: if position_embeddings is None: logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.') (cos, sin) = self.rotary_emb(value_layer, position_ids) else: (cos, sin) = position_embeddings (query_layer, key_layer) = apply_rotary_pos_emb(query_layer, key_layer, cos, sin) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} if alibi is None: cache_kwargs.update({'sin': sin, 'cos': cos}) (key_layer, value_layer) = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs) query_layer = query_layer.transpose(1, 2) key_layer = key_layer.transpose(1, 2) value_layer = value_layer.transpose(1, 2) if alibi is not None: raise ValueError('`alibi` is not supported when `use_flash_attn` is True') attn_dropout = self.config.attention_dropout if self.training else 0.0 input_dtype = query_layer.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query_key_value.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_layer = query_layer.to(target_dtype) key_layer = key_layer.to(target_dtype) value_layer = value_layer.to(target_dtype) attn_output = _flash_attention_forward(query_layer, key_layer, value_layer, attention_mask, query_length, position_ids=position_ids, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_weights = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.dense(attn_weights) if not output_attentions: attn_weights = None return (attn_output, layer_past, attn_weights) class FalconMLP(nn.Module): def __init__(self, config: FalconConfig): super().__init__() hidden_size = config.hidden_size self.dense_h_to_4h = FalconLinear(hidden_size, config.ffn_hidden_size, bias=config.bias) self.act = get_activation(config.activation) self.dense_4h_to_h = FalconLinear(config.ffn_hidden_size, hidden_size, bias=config.bias) self.hidden_dropout = config.hidden_dropout def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.act(self.dense_h_to_4h(x)) x = self.dense_4h_to_h(x) return x FALCON_ATTENTION_CLASSES = {'eager': FalconAttention, 'sdpa': FalconAttention, 'flash_attention_2': FalconFlashAttention2} class FalconDecoderLayer(nn.Module): def __init__(self, config: FalconConfig, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.self_attention = FALCON_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = FalconMLP(config) self.hidden_dropout = config.hidden_dropout self.config = config if config.num_ln_in_parallel_attn is None and config.new_decoder_architecture: config.num_ln_in_parallel_attn = 2 if not config.parallel_attn: self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) elif config.num_ln_in_parallel_attn == 2: self.ln_attn = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.ln_mlp = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) else: self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) def forward(self, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, layer_past: Optional[Union[Cache, Tuple[torch.Tensor, torch.Tensor]]]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs): residual = hidden_states if self.config.new_decoder_architecture and self.config.num_ln_in_parallel_attn == 2: attention_layernorm_out = self.ln_attn(hidden_states) mlp_layernorm_out = self.ln_mlp(hidden_states) else: attention_layernorm_out = self.input_layernorm(hidden_states) attn_outputs = self.self_attention(attention_layernorm_out, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings) attention_output = attn_outputs[0] if not self.config.new_decoder_architecture: if self.config.parallel_attn: mlp_layernorm_out = attention_layernorm_out else: residual = dropout_add(attention_output, residual, self.config.attention_dropout, training=self.training) mlp_layernorm_out = self.post_attention_layernorm(residual) if self.config.new_decoder_architecture and self.config.parallel_attn and (self.config.num_ln_in_parallel_attn == 1): mlp_layernorm_out = attention_layernorm_out outputs = attn_outputs[1:] mlp_output = self.mlp(mlp_layernorm_out) if self.config.new_decoder_architecture or self.config.parallel_attn: mlp_output += attention_output output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs FALCON_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FalconConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FALCON_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`\n (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" class FalconPreTrainedModel(PreTrainedModel): config_class = FalconConfig base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['FalconDecoderLayer'] _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module: nn.Module): if isinstance(module, nn.Linear) or isinstance(module, FalconLinear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False) -> 'PretrainedConfig': if hard_check_only: if not is_torch_greater_or_equal_than_2_0: raise ImportError('PyTorch SDPA requirements in Transformers are not met. Please install torch>=2.0.') if not is_torch_greater_or_equal_than_2_0: return config _is_bettertransformer = getattr(cls, 'use_bettertransformer', False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = 'sdpa' return config @add_start_docstrings('The bare Falcon Model transformer outputting raw hidden-states without any specific head on top.', FALCON_START_DOCSTRING) class FalconModel(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.use_alibi = config.alibi self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) self.h = nn.ModuleList([FalconDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self._use_sdpa = config._attn_implementation == 'sdpa' self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.rotary_emb = FalconRotaryEmbedding(config=config) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.word_embeddings def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') alibi = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 (batch_size, seq_length, _) = inputs_embeds.shape if self.use_alibi: mask = torch.ones((batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long) if attention_mask is None else attention_mask alibi = build_alibi_tensor(mask, self.num_heads, dtype=inputs_embeds.dtype) if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions, head_mask, alibi) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) next_decoder_cache = None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, block) in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, alibi, causal_mask, position_ids, head_mask[i], past_key_values, use_cache, output_attentions, cache_position, position_embeddings) else: outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position, position_embeddings=position_embeddings) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool, head_mask: torch.Tensor, alibi: torch.Tensor): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions) and (head_mask is None) and (alibi is None): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min (batch_size, sequence_length, _) = input_tensor.shape if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if head_mask is None and alibi is not None: alibi = alibi.reshape(batch_size, -1, *alibi.shape[1:]) causal_mask = torch.masked_fill(alibi / math.sqrt(self.config.hidden_size // self.num_heads), causal_mask < -1, min_dtype) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).', FALCON_START_DOCSTRING) class FalconForCausalLM(FalconPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: FalconConfig): super().__init__(config) self.transformer = FalconModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, past_key_values: Optional[Union[Cache, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: bool=True, **kwargs) -> dict: if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if not self.transformer.use_alibi and attention_mask is not None and (position_ids is None): position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() (batch_size, seq_length, vocab_size) = shift_logits.shape loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def _reorder_cache(self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: device_to_beam_idx = {past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past} reordered_past = tuple(((layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device])) for layer_past in past)) return reordered_past @add_start_docstrings('\n The Falcon Model transformer with a sequence classification head on top (linear layer).\n\n [`FalconForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', FALCON_START_DOCSTRING) class FalconForSequenceClassification(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = FalconModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Falcon Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', FALCON_START_DOCSTRING) class FalconForTokenClassification(FalconPreTrainedModel): def __init__(self, config: FalconConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = FalconModel(config) if getattr(config, 'classifier_dropout', None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, 'hidden_dropout', None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: (batch_size, seq_length) = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The Falcon Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FALCON_START_DOCSTRING) class FalconForQuestionAnswering(FalconPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = FalconModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.post_init() @add_start_docstrings_to_model_forward(FALCON_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/falcon_mamba/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_falcon_mamba': ['FalconMambaConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_falcon_mamba'] = ['FalconMambaForCausalLM', 'FalconMambaModel', 'FalconMambaPreTrainedModel'] if TYPE_CHECKING: from .configuration_falcon_mamba import FalconMambaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon_mamba import FalconMambaForCausalLM, FalconMambaModel, FalconMambaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/falcon_mamba/configuration_falcon_mamba.py """""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FalconMambaConfig(PretrainedConfig): model_type = 'falcon_mamba' def __init__(self, vocab_size=50280, hidden_size=768, state_size=16, num_hidden_layers=32, layer_norm_epsilon=1e-05, pad_token_id=0, bos_token_id=0, eos_token_id=0, expand=2, conv_kernel=4, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_scale=1.0, time_step_min=0.001, time_step_max=0.1, time_step_init_scheme='random', time_step_floor=0.0001, rescale_prenorm_residual=False, use_cache=True, use_mambapy=False, mixer_rms_eps=1e-06, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.state_size = state_size self.num_hidden_layers = num_hidden_layers self.layer_norm_epsilon = layer_norm_epsilon self.conv_kernel = conv_kernel self.expand = expand self.intermediate_size = int(expand * self.hidden_size) self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.use_bias = use_bias self.use_conv_bias = use_conv_bias self.hidden_act = hidden_act self.initializer_range = initializer_range self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == 'auto' else time_step_rank self.time_step_scale = time_step_scale self.time_step_min = time_step_min self.time_step_max = time_step_max self.time_step_init_scheme = time_step_init_scheme self.time_step_floor = time_step_floor self.rescale_prenorm_residual = rescale_prenorm_residual self.residual_in_fp32 = residual_in_fp32 self.use_cache = use_cache self.use_mambapy = use_mambapy self.mixer_rms_eps = mixer_rms_eps super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) # File: transformers-main/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py """""" import math from dataclasses import dataclass from typing import Any, Dict, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import MambaCache from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available, is_mambapy_available from .configuration_falcon_mamba import FalconMambaConfig logger = logging.get_logger(__name__) if is_mambapy_available(): from mambapy.pscan import pscan else: pscan = None if is_mamba_ssm_available(): from mamba_ssm.ops.selective_scan_interface import selective_scan_fn from mamba_ssm.ops.triton.selective_state_update import selective_state_update from ...kernels.falcon_mamba import mamba_inner_fn else: (selective_state_update, selective_scan_fn, mamba_inner_fn) = (None, None, None) if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: (causal_conv1d_update, causal_conv1d_fn) = (None, None) is_fast_path_available = all((selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)) _CHECKPOINT_FOR_DOC = 'tiiuae/falcon-mamba-7b' _CONFIG_FOR_DOC = 'FalconMambaConfig' def rms_forward(hidden_states, variance_epsilon=1e-06): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + variance_epsilon) return hidden_states.to(input_dtype) class FalconMambaMixer(nn.Module): def __init__(self, config: FalconMambaConfig, layer_idx: int): super().__init__() self.config = config self.hidden_size = config.hidden_size self.ssm_state_size = config.state_size self.conv_kernel_size = config.conv_kernel self.intermediate_size = config.intermediate_size self.time_step_rank = int(config.time_step_rank) self.layer_idx = layer_idx self.use_conv_bias = config.use_conv_bias self.conv1d = nn.Conv1d(in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=config.use_conv_bias, kernel_size=config.conv_kernel, groups=self.intermediate_size, padding=config.conv_kernel - 1) self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.use_mambapy = config.use_mambapy self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=config.use_bias) self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(self.intermediate_size, -1).contiguous() self.A_log = nn.Parameter(torch.log(A)) self.D = nn.Parameter(torch.ones(self.intermediate_size)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias) self.use_bias = config.use_bias self.register_buffer('b_c_rms', torch.nn.Parameter(torch.ones(self.ssm_state_size), requires_grad=False), persistent=False) self.register_buffer('dt_rms', torch.nn.Parameter(torch.ones(self.intermediate_size), requires_grad=False), persistent=False) self.rms_eps = config.mixer_rms_eps if not is_fast_path_available: if self.use_mambapy: if is_mambapy_available(): logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d') else: raise ImportError('use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py.') else: logger.warning_once('The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py.') def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): projected_states = self.in_proj(hidden_states).transpose(1, 2) if self.training and cache_params is None: contextualized_states = mamba_inner_fn(projected_states, self.conv1d.weight, self.conv1d.bias if self.use_conv_bias else None, self.x_proj.weight, self.dt_proj.weight, self.out_proj.weight, self.out_proj.bias.float() if self.use_bias else None, -torch.exp(self.A_log.float()), None, None, self.D.float(), delta_bias=self.dt_proj.bias.float(), delta_softplus=True, b_rms_weight=self.b_c_rms, c_rms_weight=self.b_c_rms, dt_rms_weight=self.dt_rms, b_c_dt_rms_eps=self.rms_eps) else: (hidden_states, gate) = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) if cache_params is not None and cache_position[0] > 0: hidden_states = causal_conv1d_update(hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation) hidden_states = hidden_states.unsqueeze(-1) else: if cache_params is not None: conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.update_conv_state(self.layer_idx, conv_states, cache_position) hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) (time_step, B, C) = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1) B = rms_forward(B, variance_epsilon=self.rms_eps) C = rms_forward(C, variance_epsilon=self.rms_eps) time_step = rms_forward(time_step, variance_epsilon=self.rms_eps) if hasattr(self.config, '_pre_quantization_dtype'): discrete_time_step = (self.dt_proj(time_step) - self.dt_proj.bias).transpose(1, 2) else: discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2) A = -torch.exp(self.A_log.float()) time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, 'bias') else None if cache_params is not None and cache_position[0] > 0: scan_outputs = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states[..., 0], discrete_time_step[..., 0], A, B[:, 0], C[:, 0], self.D, gate[..., 0], time_proj_bias, dt_softplus=True).unsqueeze(-1) else: (scan_outputs, ssm_state) = selective_scan_fn(hidden_states, discrete_time_step, A, B.transpose(1, 2), C.transpose(1, 2), self.D.float(), gate, time_proj_bias, delta_softplus=True, return_last_state=True) if ssm_state is not None and cache_params is not None: cache_params.update_ssm_state(self.layer_idx, ssm_state) contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) return contextualized_states def slow_forward(self, input_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): (batch_size, seq_len, _) = input_states.shape dtype = input_states.dtype projected_states = self.in_proj(input_states).transpose(1, 2) (hidden_states, gate) = projected_states.chunk(2, dim=1) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) if cache_params is not None: ssm_state = cache_params.ssm_states[self.layer_idx].clone() ssm_state = ssm_state.to(hidden_states.device) if cache_position is not None and cache_position.shape[0] == self.conv_kernel_size: conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.update_conv_state(self.layer_idx, conv_state, cache_position) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) else: conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position) hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) if self.use_conv_bias: hidden_states += self.conv1d.bias hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) else: ssm_state = torch.zeros((batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) if attention_mask is not None: hidden_states = hidden_states * attention_mask.unsqueeze(1) ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) (time_step, B, C) = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1) B = rms_forward(B, variance_epsilon=self.rms_eps) C = rms_forward(C, variance_epsilon=self.rms_eps) time_step = rms_forward(time_step, variance_epsilon=self.rms_eps) discrete_time_step = self.dt_proj(time_step) discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) A = -torch.exp(self.A_log.float()) discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() deltaB_u = discrete_B * hidden_states[:, :, :, None].float() if self.use_mambapy and self.training and (cache_params is None): hs = pscan(discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2)) scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2) scan_output = scan_output + hidden_states * self.D[None, :, None] scan_output = scan_output * self.act(gate) else: scan_outputs = [] for i in range(seq_len): ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) scan_outputs.append(scan_output[:, :, 0]) scan_output = torch.stack(scan_outputs, dim=-1) scan_output = scan_output + hidden_states * self.D[None, :, None] scan_output = scan_output * self.act(gate) if cache_params is not None: cache_params.update_ssm_state(self.layer_idx, ssm_state) contextualized_states = self.out_proj(scan_output.transpose(1, 2)) return contextualized_states def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): if is_fast_path_available and 'cuda' in self.x_proj.weight.device.type and (not torch._dynamo.is_compiling()): return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask) return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask) class FalconMambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def extra_repr(self): return f'{self.weight.shape[0]}, eps={self.variance_epsilon}' def forward(self, hidden_states): return self.weight.to(hidden_states.device) * rms_forward(hidden_states, variance_epsilon=self.variance_epsilon) class FalconMambaBlock(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.residual_in_fp32 = config.residual_in_fp32 self.norm = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = FalconMambaMixer(config, layer_idx=layer_idx) def forward(self, hidden_states, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None): residual = hidden_states hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype)) if self.residual_in_fp32: residual = residual.to(torch.float32) hidden_states = self.mixer(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) hidden_states = residual + hidden_states return hidden_states class FalconMambaPreTrainedModel(PreTrainedModel): config_class = FalconMambaConfig base_model_prefix = 'backbone' _no_split_modules = ['FalconMambaBlock', 'FalconMambaMixer'] supports_gradient_checkpointing = True _is_stateful = True def _init_weights(self, module): if isinstance(module, FalconMambaMixer): module.A_log._no_weight_decay = True module.D._no_weight_decay = True dt_init_std = self.config.time_step_rank ** (-0.5) * self.config.time_step_scale if self.config.time_step_init_scheme == 'constant': nn.init.constant_(module.dt_proj.weight, dt_init_std) elif self.config.time_step_init_scheme == 'random': nn.init.uniform_(module.dt_proj.weight, -dt_init_std, dt_init_std) dt = torch.exp(torch.rand(self.config.intermediate_size) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) with torch.no_grad(): module.dt_proj.bias.copy_(inv_dt) module.dt_proj.bias._no_reinit = True if isinstance(module, nn.Linear): if module.bias is not None: if not getattr(module.bias, '_no_reinit', False): nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, std=self.config.initializer_range) if self.config.rescale_prenorm_residual: for (name, p) in module.named_parameters(): if name in ['out_proj.weight']: nn.init.kaiming_uniform_(p, a=math.sqrt(5)) with torch.no_grad(): p /= math.sqrt(self.config.num_hidden_layers) @dataclass class FalconMambaOutput(ModelOutput): last_hidden_state: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FalconMambaCausalLMOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None cache_params: Optional[MambaCache] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None FALCONMAMBA_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FalconMambaConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FALCONMAMBA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n cache_params (`MambaCache`, *optional*):\n If passed along, the model uses the previous state in all the blocks (which will give the output for the\n `input_ids` provided as if the model add `state_input_ids + input_ids` as context).\n use_cache (`bool`, *optional*):\n If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare FALCONMAMBA Model transformer outputting raw hidden-states without any specific head on top.', FALCONMAMBA_START_DOCSTRING) class FalconMambaModel(FalconMambaPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([FalconMambaBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.norm_f = FalconMambaRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.post_init() def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings @add_start_docstrings_to_model_forward(FALCONMAMBA_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=FalconMambaOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, cache_params: Optional[MambaCache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> Union[Tuple, FalconMambaOutput]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache if not self.training else False return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) if self.gradient_checkpointing and self.training and use_cache: use_cache = False if use_cache: if cache_params is None: cache_params = MambaCache(self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype) cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device) elif cache_position is None: raise ValueError("You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will be initialized for you automatically") else: cache_params = None hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None for mixer_block in self.layers: if self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func(mixer_block.__call__, hidden_states, cache_params, cache_position, attention_mask) else: hidden_states = mixer_block(hidden_states, cache_params=cache_params, cache_position=cache_position, attention_mask=attention_mask) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.norm_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)) return FalconMambaOutput(last_hidden_state=hidden_states, cache_params=cache_params if use_cache else None, hidden_states=all_hidden_states) @add_start_docstrings('\n The FALCONMAMBA Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', FALCONMAMBA_START_DOCSTRING) class FalconMambaForCausalLM(FalconMambaPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.backbone = FalconMambaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_input_embeddings(self): return self.backbone.get_input_embeddings() def set_input_embeddings(self, new_embeddings): return self.backbone.set_input_embeddings(new_embeddings) def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: Dict[str, Any], num_new_tokens: int=1, **kwargs) -> Dict[str, Any]: model_kwargs['cache_params'] = outputs.get('cache_params', None) if model_kwargs.get('use_cache', True) and 'cache_position' in model_kwargs and (model_kwargs['cache_position'] is not None): model_kwargs['cache_position'] = model_kwargs['cache_position'][-1:] + num_new_tokens if 'attention_mask' in model_kwargs: attention_mask = model_kwargs['attention_mask'] model_kwargs['attention_mask'] = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1) return model_kwargs def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, use_cache=None, cache_params: Optional[MambaCache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, **kwargs): if use_cache: if cache_position is None: raise ValueError('`cache_position` should not be None as it should have been initialized in `model.generate`, you are responsible for passing in a valid `cache_position` if you are calling `prepare_inputs_for_generation` directly with `use_cache=True`') if cache_position[0] > 0: input_ids = input_ids[:, -1].unsqueeze(-1) if attention_mask is not None: attention_mask = None else: cache_position = torch.arange(0, self.config.conv_kernel, device=input_ids.device) if inputs_embeds is not None and cache_params is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids.contiguous()} model_inputs.update({'cache_params': cache_params, 'use_cache': use_cache, 'cache_position': cache_position, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings_to_model_forward(FALCONMAMBA_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=FalconMambaCausalLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_params: Optional[MambaCache]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[Tuple, FalconMambaCausalLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict falcon_mamba_outputs = self.backbone(input_ids, cache_params=cache_params, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, cache_position=cache_position, attention_mask=attention_mask) hidden_states = falcon_mamba_outputs[0] logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float() loss = None if labels is not None: labels = labels.to(logits.device) shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + falcon_mamba_outputs[1:] return (loss,) + output if loss is not None else output return FalconMambaCausalLMOutput(loss=loss, logits=logits, cache_params=falcon_mamba_outputs.cache_params, hidden_states=falcon_mamba_outputs.hidden_states) # File: transformers-main/src/transformers/models/fastspeech2_conformer/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_fastspeech2_conformer': ['FastSpeech2ConformerConfig', 'FastSpeech2ConformerHifiGanConfig', 'FastSpeech2ConformerWithHifiGanConfig'], 'tokenization_fastspeech2_conformer': ['FastSpeech2ConformerTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_fastspeech2_conformer'] = ['FastSpeech2ConformerWithHifiGan', 'FastSpeech2ConformerHifiGan', 'FastSpeech2ConformerModel', 'FastSpeech2ConformerPreTrainedModel'] if TYPE_CHECKING: from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig from .tokenization_fastspeech2_conformer import FastSpeech2ConformerTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fastspeech2_conformer import FastSpeech2ConformerHifiGan, FastSpeech2ConformerModel, FastSpeech2ConformerPreTrainedModel, FastSpeech2ConformerWithHifiGan else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py """""" from typing import Dict from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FastSpeech2ConformerConfig(PretrainedConfig): model_type = 'fastspeech2_conformer' attribute_map = {'num_hidden_layers': 'encoder_layers', 'num_attention_heads': 'encoder_num_attention_heads'} def __init__(self, hidden_size=384, vocab_size=78, num_mel_bins=80, encoder_num_attention_heads=2, encoder_layers=4, encoder_linear_units=1536, decoder_layers=4, decoder_num_attention_heads=2, decoder_linear_units=1536, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, positionwise_conv_kernel_size=3, encoder_normalize_before=False, decoder_normalize_before=False, encoder_concat_after=False, decoder_concat_after=False, reduction_factor=1, speaking_speed=1.0, use_macaron_style_in_conformer=True, use_cnn_in_conformer=True, encoder_kernel_size=7, decoder_kernel_size=31, duration_predictor_layers=2, duration_predictor_channels=256, duration_predictor_kernel_size=3, energy_predictor_layers=2, energy_predictor_channels=256, energy_predictor_kernel_size=3, energy_predictor_dropout=0.5, energy_embed_kernel_size=1, energy_embed_dropout=0.0, stop_gradient_from_energy_predictor=False, pitch_predictor_layers=5, pitch_predictor_channels=256, pitch_predictor_kernel_size=5, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=1, pitch_embed_dropout=0.0, stop_gradient_from_pitch_predictor=True, encoder_dropout_rate=0.2, encoder_positional_dropout_rate=0.2, encoder_attention_dropout_rate=0.2, decoder_dropout_rate=0.2, decoder_positional_dropout_rate=0.2, decoder_attention_dropout_rate=0.2, duration_predictor_dropout_rate=0.2, speech_decoder_postnet_dropout=0.5, max_source_positions=5000, use_masking=True, use_weighted_masking=False, num_speakers=None, num_languages=None, speaker_embed_dim=None, is_encoder_decoder=True, **kwargs): if positionwise_conv_kernel_size % 2 == 0: raise ValueError(f'positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead.') if encoder_kernel_size % 2 == 0: raise ValueError(f'encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.') if decoder_kernel_size % 2 == 0: raise ValueError(f'decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.') if duration_predictor_kernel_size % 2 == 0: raise ValueError(f'duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead.') if energy_predictor_kernel_size % 2 == 0: raise ValueError(f'energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead.') if energy_embed_kernel_size % 2 == 0: raise ValueError(f'energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.') if pitch_predictor_kernel_size % 2 == 0: raise ValueError(f'pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead.') if pitch_embed_kernel_size % 2 == 0: raise ValueError(f'pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.') if hidden_size % encoder_num_attention_heads != 0: raise ValueError('The hidden_size must be evenly divisible by encoder_num_attention_heads.') if hidden_size % decoder_num_attention_heads != 0: raise ValueError('The hidden_size must be evenly divisible by decoder_num_attention_heads.') if use_masking and use_weighted_masking: raise ValueError('Either use_masking or use_weighted_masking can be True, but not both.') self.hidden_size = hidden_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.encoder_config = {'num_attention_heads': encoder_num_attention_heads, 'layers': encoder_layers, 'kernel_size': encoder_kernel_size, 'attention_dropout_rate': encoder_attention_dropout_rate, 'dropout_rate': encoder_dropout_rate, 'positional_dropout_rate': encoder_positional_dropout_rate, 'linear_units': encoder_linear_units, 'normalize_before': encoder_normalize_before, 'concat_after': encoder_concat_after} self.decoder_config = {'num_attention_heads': decoder_num_attention_heads, 'layers': decoder_layers, 'kernel_size': decoder_kernel_size, 'attention_dropout_rate': decoder_attention_dropout_rate, 'dropout_rate': decoder_dropout_rate, 'positional_dropout_rate': decoder_positional_dropout_rate, 'linear_units': decoder_linear_units, 'normalize_before': decoder_normalize_before, 'concat_after': decoder_concat_after} self.encoder_num_attention_heads = encoder_num_attention_heads self.encoder_layers = encoder_layers self.duration_predictor_channels = duration_predictor_channels self.duration_predictor_kernel_size = duration_predictor_kernel_size self.duration_predictor_layers = duration_predictor_layers self.energy_embed_dropout = energy_embed_dropout self.energy_embed_kernel_size = energy_embed_kernel_size self.energy_predictor_channels = energy_predictor_channels self.energy_predictor_dropout = energy_predictor_dropout self.energy_predictor_kernel_size = energy_predictor_kernel_size self.energy_predictor_layers = energy_predictor_layers self.pitch_embed_dropout = pitch_embed_dropout self.pitch_embed_kernel_size = pitch_embed_kernel_size self.pitch_predictor_channels = pitch_predictor_channels self.pitch_predictor_dropout = pitch_predictor_dropout self.pitch_predictor_kernel_size = pitch_predictor_kernel_size self.pitch_predictor_layers = pitch_predictor_layers self.positionwise_conv_kernel_size = positionwise_conv_kernel_size self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.reduction_factor = reduction_factor self.speaking_speed = speaking_speed self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor self.max_source_positions = max_source_positions self.use_cnn_in_conformer = use_cnn_in_conformer self.use_macaron_style_in_conformer = use_macaron_style_in_conformer self.use_masking = use_masking self.use_weighted_masking = use_weighted_masking self.num_speakers = num_speakers self.num_languages = num_languages self.speaker_embed_dim = speaker_embed_dim self.duration_predictor_dropout_rate = duration_predictor_dropout_rate self.is_encoder_decoder = is_encoder_decoder super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) class FastSpeech2ConformerHifiGanConfig(PretrainedConfig): model_type = 'hifigan' def __init__(self, model_in_dim=80, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs): self.model_in_dim = model_in_dim self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig): model_type = 'fastspeech2_conformer_with_hifigan' is_composition = True def __init__(self, model_config: Dict=None, vocoder_config: Dict=None, **kwargs): if model_config is None: model_config = {} logger.info('model_config is None. initializing the model with default values.') if vocoder_config is None: vocoder_config = {} logger.info('vocoder_config is None. initializing the coarse model with default values.') self.model_config = FastSpeech2ConformerConfig(**model_config) self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config) super().__init__(**kwargs) # File: transformers-main/src/transformers/models/fastspeech2_conformer/convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import json import re from pathlib import Path from tempfile import TemporaryDirectory import torch import yaml from transformers import FastSpeech2ConformerConfig, FastSpeech2ConformerModel, FastSpeech2ConformerTokenizer, logging logging.set_verbosity_info() logger = logging.get_logger('transformers.models.FastSpeech2Conformer') CONFIG_MAPPING = {'adim': 'hidden_size', 'aheads': 'num_attention_heads', 'conformer_dec_kernel_size': 'decoder_kernel_size', 'conformer_enc_kernel_size': 'encoder_kernel_size', 'decoder_normalize_before': 'decoder_normalize_before', 'dlayers': 'decoder_layers', 'dunits': 'decoder_linear_units', 'duration_predictor_chans': 'duration_predictor_channels', 'duration_predictor_kernel_size': 'duration_predictor_kernel_size', 'duration_predictor_layers': 'duration_predictor_layers', 'elayers': 'encoder_layers', 'encoder_normalize_before': 'encoder_normalize_before', 'energy_embed_dropout': 'energy_embed_dropout', 'energy_embed_kernel_size': 'energy_embed_kernel_size', 'energy_predictor_chans': 'energy_predictor_channels', 'energy_predictor_dropout': 'energy_predictor_dropout', 'energy_predictor_kernel_size': 'energy_predictor_kernel_size', 'energy_predictor_layers': 'energy_predictor_layers', 'eunits': 'encoder_linear_units', 'pitch_embed_dropout': 'pitch_embed_dropout', 'pitch_embed_kernel_size': 'pitch_embed_kernel_size', 'pitch_predictor_chans': 'pitch_predictor_channels', 'pitch_predictor_dropout': 'pitch_predictor_dropout', 'pitch_predictor_kernel_size': 'pitch_predictor_kernel_size', 'pitch_predictor_layers': 'pitch_predictor_layers', 'positionwise_conv_kernel_size': 'positionwise_conv_kernel_size', 'postnet_chans': 'speech_decoder_postnet_units', 'postnet_filts': 'speech_decoder_postnet_kernel', 'postnet_layers': 'speech_decoder_postnet_layers', 'reduction_factor': 'reduction_factor', 'stop_gradient_from_energy_predictor': 'stop_gradient_from_energy_predictor', 'stop_gradient_from_pitch_predictor': 'stop_gradient_from_pitch_predictor', 'transformer_dec_attn_dropout_rate': 'decoder_attention_dropout_rate', 'transformer_dec_dropout_rate': 'decoder_dropout_rate', 'transformer_dec_positional_dropout_rate': 'decoder_positional_dropout_rate', 'transformer_enc_attn_dropout_rate': 'encoder_attention_dropout_rate', 'transformer_enc_dropout_rate': 'encoder_dropout_rate', 'transformer_enc_positional_dropout_rate': 'encoder_positional_dropout_rate', 'use_cnn_in_conformer': 'use_cnn_in_conformer', 'use_macaron_style_in_conformer': 'use_macaron_style_in_conformer', 'use_masking': 'use_masking', 'use_weighted_masking': 'use_weighted_masking', 'idim': 'input_dim', 'odim': 'num_mel_bins', 'spk_embed_dim': 'speaker_embed_dim', 'langs': 'num_languages', 'spks': 'num_speakers'} def remap_model_yaml_config(yaml_config_path): with Path(yaml_config_path).open('r', encoding='utf-8') as f: args = yaml.safe_load(f) args = argparse.Namespace(**args) remapped_config = {} model_params = args.tts_conf['text2mel_params'] for (espnet_config_key, hf_config_key) in CONFIG_MAPPING.items(): if espnet_config_key in model_params: remapped_config[hf_config_key] = model_params[espnet_config_key] return (remapped_config, args.g2p, args.token_list) def convert_espnet_state_dict_to_hf(state_dict): new_state_dict = {} for key in state_dict: if 'tts.generator.text2mel.' in key: new_key = key.replace('tts.generator.text2mel.', '') if 'postnet' in key: new_key = new_key.replace('postnet.postnet', 'speech_decoder_postnet.layers') new_key = new_key.replace('.0.weight', '.conv.weight') new_key = new_key.replace('.1.weight', '.batch_norm.weight') new_key = new_key.replace('.1.bias', '.batch_norm.bias') new_key = new_key.replace('.1.running_mean', '.batch_norm.running_mean') new_key = new_key.replace('.1.running_var', '.batch_norm.running_var') new_key = new_key.replace('.1.num_batches_tracked', '.batch_norm.num_batches_tracked') if 'feat_out' in key: if 'weight' in key: new_key = 'speech_decoder_postnet.feat_out.weight' if 'bias' in key: new_key = 'speech_decoder_postnet.feat_out.bias' if 'encoder.embed.0.weight' in key: new_key = new_key.replace('0.', '') if 'w_1' in key: new_key = new_key.replace('w_1', 'conv1') if 'w_2' in key: new_key = new_key.replace('w_2', 'conv2') if 'predictor.conv' in key: new_key = new_key.replace('.conv', '.conv_layers') pattern = '(\\d)\\.(\\d)' replacement = '\\1.conv' if '2.weight' not in new_key and '2.bias' not in new_key else '\\1.layer_norm' new_key = re.sub(pattern, replacement, new_key) if 'pitch_embed' in key or 'energy_embed' in key: new_key = new_key.replace('0', 'conv') if 'encoders' in key: new_key = new_key.replace('encoders', 'conformer_layers') new_key = new_key.replace('norm_final', 'final_layer_norm') new_key = new_key.replace('norm_mha', 'self_attn_layer_norm') new_key = new_key.replace('norm_ff_macaron', 'ff_macaron_layer_norm') new_key = new_key.replace('norm_ff', 'ff_layer_norm') new_key = new_key.replace('norm_conv', 'conv_layer_norm') if 'lid_emb' in key: new_key = new_key.replace('lid_emb', 'language_id_embedding') if 'sid_emb' in key: new_key = new_key.replace('sid_emb', 'speaker_id_embedding') new_state_dict[new_key] = state_dict[key] return new_state_dict @torch.no_grad() def convert_FastSpeech2ConformerModel_checkpoint(checkpoint_path, yaml_config_path, pytorch_dump_folder_path, repo_id=None): (model_params, tokenizer_name, vocab) = remap_model_yaml_config(yaml_config_path) config = FastSpeech2ConformerConfig(**model_params) model = FastSpeech2ConformerModel(config) espnet_checkpoint = torch.load(checkpoint_path) hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint) model.load_state_dict(hf_compatible_state_dict) model.save_pretrained(pytorch_dump_folder_path) with TemporaryDirectory() as tempdir: vocab = {token: id for (id, token) in enumerate(vocab)} vocab_file = Path(tempdir) / 'vocab.json' with open(vocab_file, 'w') as f: json.dump(vocab, f) should_strip_spaces = 'no_space' in tokenizer_name tokenizer = FastSpeech2ConformerTokenizer(str(vocab_file), should_strip_spaces=should_strip_spaces) tokenizer.save_pretrained(pytorch_dump_folder_path) if repo_id: print('Pushing to the hub...') model.push_to_hub(repo_id) tokenizer.push_to_hub(repo_id) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--yaml_config_path', required=True, default=None, type=str, help='Path to config.yaml of model to convert') parser.add_argument('--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.') args = parser.parse_args() convert_FastSpeech2ConformerModel_checkpoint(args.checkpoint_path, args.yaml_config_path, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/fastspeech2_conformer/convert_hifigan.py """""" import argparse from pathlib import Path import torch import yaml from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, logging logging.set_verbosity_info() logger = logging.get_logger('transformers.models.FastSpeech2Conformer') def load_weights(checkpoint, hf_model, config): vocoder_key_prefix = 'tts.generator.vocoder.' checkpoint = {k.replace(vocoder_key_prefix, ''): v for (k, v) in checkpoint.items() if vocoder_key_prefix in k} hf_model.apply_weight_norm() hf_model.conv_pre.weight_g.data = checkpoint['input_conv.weight_g'] hf_model.conv_pre.weight_v.data = checkpoint['input_conv.weight_v'] hf_model.conv_pre.bias.data = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates)): hf_model.upsampler[i].weight_g.data = checkpoint[f'upsamples.{i}.1.weight_g'] hf_model.upsampler[i].weight_v.data = checkpoint[f'upsamples.{i}.1.weight_v'] hf_model.upsampler[i].bias.data = checkpoint[f'upsamples.{i}.1.bias'] for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)): for j in range(len(config.resblock_dilation_sizes)): hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g'] hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v'] hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f'blocks.{i}.convs1.{j}.1.bias'] hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g'] hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v'] hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f'blocks.{i}.convs2.{j}.1.bias'] hf_model.conv_post.weight_g.data = checkpoint['output_conv.1.weight_g'] hf_model.conv_post.weight_v.data = checkpoint['output_conv.1.weight_v'] hf_model.conv_post.bias.data = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() def remap_hifigan_yaml_config(yaml_config_path): with Path(yaml_config_path).open('r', encoding='utf-8') as f: args = yaml.safe_load(f) args = argparse.Namespace(**args) vocoder_type = args.tts_conf['vocoder_type'] if vocoder_type != 'hifigan_generator': raise TypeError(f'Vocoder config must be for `hifigan_generator`, but got {vocoder_type}') remapped_dict = {} vocoder_params = args.tts_conf['vocoder_params'] key_mappings = {'channels': 'upsample_initial_channel', 'in_channels': 'model_in_dim', 'resblock_dilations': 'resblock_dilation_sizes', 'resblock_kernel_sizes': 'resblock_kernel_sizes', 'upsample_kernel_sizes': 'upsample_kernel_sizes', 'upsample_scales': 'upsample_rates'} for (espnet_config_key, hf_config_key) in key_mappings.items(): remapped_dict[hf_config_key] = vocoder_params[espnet_config_key] remapped_dict['sampling_rate'] = args.tts_conf['sampling_rate'] remapped_dict['normalize_before'] = False remapped_dict['leaky_relu_slope'] = vocoder_params['nonlinear_activation_params']['negative_slope'] return remapped_dict @torch.no_grad() def convert_hifigan_checkpoint(checkpoint_path, pytorch_dump_folder_path, yaml_config_path=None, repo_id=None): if yaml_config_path is not None: config_kwargs = remap_hifigan_yaml_config(yaml_config_path) config = FastSpeech2ConformerHifiGanConfig(**config_kwargs) else: config = FastSpeech2ConformerHifiGanConfig() model = FastSpeech2ConformerHifiGan(config) orig_checkpoint = torch.load(checkpoint_path) load_weights(orig_checkpoint, model, config) model.save_pretrained(pytorch_dump_folder_path) if repo_id: print('Pushing to the hub...') model.push_to_hub(repo_id) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--yaml_config_path', default=None, type=str, help='Path to config.yaml of model to convert') parser.add_argument('--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.') args = parser.parse_args() convert_hifigan_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.yaml_config_path, args.push_to_hub) # File: transformers-main/src/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py """""" import argparse import torch from transformers import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerModel, FastSpeech2ConformerWithHifiGan, FastSpeech2ConformerWithHifiGanConfig, logging from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import convert_espnet_state_dict_to_hf, remap_model_yaml_config from .convert_hifigan import load_weights, remap_hifigan_yaml_config logging.set_verbosity_info() logger = logging.get_logger('transformers.models.FastSpeech2Conformer') def convert_FastSpeech2ConformerWithHifiGan_checkpoint(checkpoint_path, yaml_config_path, pytorch_dump_folder_path, repo_id=None): (model_params, *_) = remap_model_yaml_config(yaml_config_path) model_config = FastSpeech2ConformerConfig(**model_params) model = FastSpeech2ConformerModel(model_config) espnet_checkpoint = torch.load(checkpoint_path) hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint) model.load_state_dict(hf_compatible_state_dict) config_kwargs = remap_hifigan_yaml_config(yaml_config_path) vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs) vocoder = FastSpeech2ConformerHifiGan(vocoder_config) load_weights(espnet_checkpoint, vocoder, vocoder_config) config = FastSpeech2ConformerWithHifiGanConfig.from_sub_model_configs(model_config, vocoder_config) with_hifigan_model = FastSpeech2ConformerWithHifiGan(config) with_hifigan_model.model = model with_hifigan_model.vocoder = vocoder with_hifigan_model.save_pretrained(pytorch_dump_folder_path) if repo_id: print('Pushing to the hub...') with_hifigan_model.push_to_hub(repo_id) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--yaml_config_path', required=True, default=None, type=str, help='Path to config.yaml of model to convert') parser.add_argument('--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output `FastSpeech2ConformerModel` PyTorch model.') parser.add_argument('--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.') args = parser.parse_args() convert_FastSpeech2ConformerWithHifiGan_checkpoint(args.checkpoint_path, args.yaml_config_path, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py """""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from torch import nn from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, logging, replace_return_docstrings from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig logger = logging.get_logger(__name__) @dataclass class FastSpeech2ConformerModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None spectrogram: torch.FloatTensor = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None duration_outputs: torch.LongTensor = None pitch_outputs: torch.FloatTensor = None energy_outputs: torch.FloatTensor = None @dataclass class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput): waveform: torch.FloatTensor = None _CONFIG_FOR_DOC = 'FastSpeech2ConformerConfig' FASTSPEECH2_CONFORMER_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FastSpeech2ConformerConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' HIFIGAN_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FastSpeech2ConformerConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FastSpeech2ConformerWithHifiGanConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' def length_regulator(encoded_embeddings, duration_labels, speaking_speed=1.0): if speaking_speed <= 0: raise ValueError('`speaking_speed` must be greater than 0.') elif speaking_speed != 1.0: duration_labels = torch.round(duration_labels.float() * speaking_speed).long() if duration_labels.sum() == 0: duration_labels[duration_labels.sum(dim=1).eq(0)] = 1 max_len = torch.sum(duration_labels, dim=1).max() hidden_states = torch.zeros((encoded_embeddings.size(0), max_len, encoded_embeddings.size(2)), dtype=torch.float, device=encoded_embeddings.device) for (i, (encoded_embedding, target_duration)) in enumerate(zip(encoded_embeddings, duration_labels)): repeated = torch.repeat_interleave(encoded_embedding, target_duration, dim=0) hidden_states[i, :repeated.size(0)] = repeated return hidden_states class FastSpeech2ConformerDurationPredictor(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig): super().__init__() self.conv_layers = nn.ModuleList() self.log_domain_offset = 1.0 for layer_idx in range(config.duration_predictor_layers): num_chans = config.duration_predictor_channels input_channels = config.hidden_size if layer_idx == 0 else num_chans layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, config.duration_predictor_kernel_size, config.duration_predictor_dropout_rate) self.conv_layers.append(layer) self.linear = nn.Linear(config.duration_predictor_channels, 1) def forward(self, encoder_hidden_states): hidden_states = encoder_hidden_states.transpose(1, -1) for layer in self.conv_layers: hidden_states = layer(hidden_states) hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1) if not self.training: hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long() return hidden_states class FastSpeech2ConformerBatchNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() if layer_id == 0: in_conv_dim = config.num_mel_bins else: in_conv_dim = config.speech_decoder_postnet_units if layer_id == config.speech_decoder_postnet_layers - 1: out_conv_dim = config.num_mel_bins else: out_conv_dim = config.speech_decoder_postnet_units self.conv = nn.Conv1d(in_conv_dim, out_conv_dim, kernel_size=config.speech_decoder_postnet_kernel, stride=1, padding=(config.speech_decoder_postnet_kernel - 1) // 2, bias=False) self.batch_norm = nn.BatchNorm1d(out_conv_dim) if layer_id < config.speech_decoder_postnet_layers - 1: self.activation = nn.Tanh() else: self.activation = None self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.batch_norm(hidden_states) if self.activation is not None: hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor) self.layers = nn.ModuleList([FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]) def forward(self, hidden_states: torch.Tensor): outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins) layer_output = outputs_before_postnet.transpose(1, 2) for layer in self.layers: layer_output = layer(layer_output) outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2) return (outputs_before_postnet, outputs_after_postnet) class FastSpeech2ConformerPredictorLayer(nn.Module): def __init__(self, input_channels, num_chans, kernel_size, dropout_rate): super().__init__() self.conv = nn.Conv1d(input_channels, num_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.activation = nn.ReLU() self.layer_norm = nn.LayerNorm(num_chans) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, -1) hidden_states = self.dropout(hidden_states) return hidden_states class FastSpeech2ConformerVariancePredictor(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5): super().__init__() self.conv_layers = nn.ModuleList() for idx in range(num_layers): input_channels = config.hidden_size if idx == 0 else num_chans layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate) self.conv_layers.append(layer) self.linear = nn.Linear(num_chans, 1) def forward(self, encoder_hidden_states, padding_masks=None): hidden_states = encoder_hidden_states.transpose(1, -1) for layer in self.conv_layers: hidden_states = layer(hidden_states) hidden_states = self.linear(hidden_states.transpose(1, 2)) if padding_masks is not None: hidden_states = hidden_states.masked_fill(padding_masks, 0.0) return hidden_states class FastSpeech2ConformerVarianceEmbedding(nn.Module): def __init__(self, in_channels=1, out_channels=384, kernel_size=1, padding=0, dropout_rate=0.0): super().__init__() self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding) self.dropout = nn.Dropout(dropout_rate) def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class FastSpeech2ConformerAttention(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() self.num_heads = module_config['num_attention_heads'] self.hidden_size = config.hidden_size self.dim_key = self.hidden_size // self.num_heads self.head_dim = self.hidden_size // self.num_heads self.linear_q = nn.Linear(self.hidden_size, self.hidden_size) self.linear_k = nn.Linear(self.hidden_size, self.hidden_size) self.linear_v = nn.Linear(self.hidden_size, self.hidden_size) self.linear_out = nn.Linear(self.hidden_size, self.hidden_size) self.dropout = nn.Dropout(p=module_config['attention_dropout_rate']) self.linear_pos = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim)) def shift_relative_position_tensor(self, pos_tensor): zero_pad = torch.zeros((*pos_tensor.size()[:3], 1), device=pos_tensor.device, dtype=pos_tensor.dtype) pos_tensor_padded = torch.cat([zero_pad, pos_tensor], dim=-1) pos_tensor_padded = pos_tensor_padded.view(*pos_tensor.size()[:2], pos_tensor.size(3) + 1, pos_tensor.size(2)) pos_tensor = pos_tensor_padded[:, :, 1:].view_as(pos_tensor)[:, :, :, :pos_tensor.size(-1) // 2 + 1] return pos_tensor def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, pos_emb: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False) -> Tuple[torch.Tensor, torch.Tensor]: (bsz, q_len, _) = hidden_states.size() query_states = self.linear_q(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) key_states = self.linear_k(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) value_states = self.linear_v(hidden_states).view(bsz, -1, self.num_heads, self.head_dim) bsz_pos = pos_emb.size(0) pos_encoding = self.linear_pos(pos_emb).view(bsz_pos, -1, self.num_heads, self.head_dim) query_with_bias_u = (query_states + self.pos_bias_u).transpose(1, 2) query_with_bias_v = (query_states + self.pos_bias_v).transpose(1, 2) matrix_ac = torch.matmul(query_with_bias_u, key_states.permute(0, 2, 3, 1)) matrix_bd = torch.matmul(query_with_bias_v, pos_encoding.permute(0, 2, 3, 1)) matrix_bd = self.shift_relative_position_tensor(matrix_bd) scores = (matrix_ac + matrix_bd) / math.sqrt(self.dim_key) if attention_mask is not None: expected_size = (bsz, 1, q_len) if attention_mask.size() != expected_size: raise ValueError(f'Attention mask should be of size {expected_size}, but is {attention_mask.size()}') attention_mask = attention_mask.unsqueeze(1).eq(0) min_value = float(torch.finfo(scores.dtype).min) scores = scores.masked_fill(attention_mask, min_value) attn_weights = torch.softmax(scores, dim=-1).masked_fill(attention_mask, 0.0) else: attn_weights = torch.softmax(scores, dim=-1) attn_weights = self.dropout(attn_weights) attn_output = torch.matmul(attn_weights, value_states.transpose(1, 2)) attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, -1) attn_output = self.linear_out(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights) class FastSpeech2ConformerConvolutionModule(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() channels = config.hidden_size kernel_size = module_config['kernel_size'] self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True) self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True) self.norm = nn.BatchNorm1d(channels) self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.pointwise_conv1(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.norm(hidden_states) hidden_states = hidden_states * torch.sigmoid(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) return hidden_states.transpose(1, 2) class FastSpeech2ConformerEncoderLayer(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() self.self_attn = FastSpeech2ConformerAttention(config, module_config) self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config) self.macaron_style = config.use_macaron_style_in_conformer if self.macaron_style: self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config) self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size) self.ff_scale = 0.5 else: self.ff_scale = 1.0 self.use_cnn_module = config.use_cnn_in_conformer if self.use_cnn_module: self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config) self.conv_layer_norm = nn.LayerNorm(config.hidden_size) self.final_layer_norm = nn.LayerNorm(config.hidden_size) self.ff_layer_norm = nn.LayerNorm(config.hidden_size) self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size) self.dropout = nn.Dropout(module_config['dropout_rate']) self.size = config.hidden_size self.normalize_before = module_config['normalize_before'] self.concat_after = module_config['concat_after'] if self.concat_after: self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False): if self.macaron_style: residual = hidden_states if self.normalize_before: hidden_states = self.ff_macaron_layer_norm(hidden_states) hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states)) if not self.normalize_before: hidden_states = self.ff_macaron_layer_norm(hidden_states) residual = hidden_states if self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) (attention_output, attention_scores) = self.self_attn(hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions) if self.concat_after: x_concat = torch.cat((hidden_states, attention_output), dim=-1) hidden_states = self.concat_linear(x_concat) hidden_states = residual + hidden_states else: hidden_states = self.dropout(attention_output) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.self_attn_layer_norm(hidden_states) if self.use_cnn_module: residual = hidden_states if self.normalize_before: hidden_states = self.conv_layer_norm(hidden_states) hidden_states = self.conv_module(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states if not self.normalize_before: hidden_states = self.conv_layer_norm(hidden_states) residual = hidden_states if self.normalize_before: hidden_states = self.ff_layer_norm(hidden_states) hidden_states = self.feed_forward(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = residual + self.ff_scale * hidden_states if not self.normalize_before: hidden_states = self.ff_layer_norm(hidden_states) if self.conv_module is not None: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attention_scores,) return outputs class FastSpeech2ConformerMultiLayeredConv1d(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() input_channels = config.hidden_size hidden_channels = module_config['linear_units'] kernel_size = config.positionwise_conv_kernel_size self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2) self.dropout = nn.Dropout(module_config['dropout_rate']) def forward(self, hidden_states): hidden_states = hidden_states.transpose(-1, 1) hidden_states = self.conv1(hidden_states) hidden_states = torch.relu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = hidden_states.transpose(-1, 1) return hidden_states class FastSpeech2ConformerRelPositionalEncoding(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config): super().__init__() self.embed_dim = config.hidden_size self.input_scale = math.sqrt(self.embed_dim) self.dropout = nn.Dropout(p=module_config['positional_dropout_rate']) self.pos_enc = None self.max_len = 5000 self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len)) def extend_pos_enc(self, x): if self.pos_enc is not None: if self.pos_enc.size(1) >= x.size(1) * 2 - 1: if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device: self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device) return pos_enc_positive = torch.zeros(x.size(1), self.embed_dim) pos_enc_negative = torch.zeros(x.size(1), self.embed_dim) position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1) div_term = torch.exp(torch.arange(0, self.embed_dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.embed_dim)) pos_enc_positive[:, 0::2] = torch.sin(position * div_term) pos_enc_positive[:, 1::2] = torch.cos(position * div_term) pos_enc_negative[:, 0::2] = torch.sin(-1 * position * div_term) pos_enc_negative[:, 1::2] = torch.cos(-1 * position * div_term) pos_enc_positive = torch.flip(pos_enc_positive, [0]).unsqueeze(0) pos_enc_negative = pos_enc_negative[1:].unsqueeze(0) pos_enc = torch.cat([pos_enc_positive, pos_enc_negative], dim=1) self.pos_enc = pos_enc.to(device=x.device, dtype=x.dtype) def forward(self, feature_representation): self.extend_pos_enc(feature_representation) hidden_states = feature_representation * self.input_scale center_idx = self.pos_enc.size(1) // 2 pos_emb = self.pos_enc[:, center_idx - hidden_states.size(1) + 1:center_idx + hidden_states.size(1)] return (self.dropout(hidden_states), self.dropout(pos_emb)) class FastSpeech2ConformerEncoder(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False): super().__init__() self.embed = None if use_encoder_input_layer: self.embed = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0) self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config) self.conformer_layers = nn.ModuleList([FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config['layers'])]) def forward(self, input_tensor: torch.LongTensor, attention_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None): feature_representation = input_tensor if self.embed is not None: feature_representation = self.embed(feature_representation) (hidden_states, pos_emb) = self.pos_enc(feature_representation) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for conformer_layer in self.conformer_layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class FastSpeech2ConformerLoss(nn.Module): def __init__(self, config: FastSpeech2ConformerConfig): super().__init__() use_masking = config.use_masking use_weighted_masking = config.use_weighted_masking if use_masking and use_weighted_masking: raise ValueError('Either use_masking or use_weighted_masking can be True, but not both.') self.use_masking = use_masking self.use_weighted_masking = use_weighted_masking reduction = 'none' if self.use_weighted_masking else 'mean' self.l1_criterion = nn.L1Loss(reduction=reduction) self.mse_criterion = nn.MSELoss(reduction=reduction) self.duration_criterion = nn.MSELoss(reduction=reduction) self.log_domain_offset = 1.0 def forward(self, outputs_after_postnet, outputs_before_postnet, duration_outputs, pitch_outputs, energy_outputs, spectrogram_labels, duration_labels, pitch_labels, energy_labels, duration_mask, spectrogram_mask): pitch_and_energy_masks = duration_mask.unsqueeze(-1) if self.use_masking: outputs_before_postnet = outputs_before_postnet.masked_select(spectrogram_mask) if outputs_after_postnet is not None: outputs_after_postnet = outputs_after_postnet.masked_select(spectrogram_mask) spectrogram_labels = spectrogram_labels.masked_select(spectrogram_mask) duration_outputs = duration_outputs.masked_select(duration_mask) duration_labels = duration_labels.masked_select(duration_mask) pitch_outputs = pitch_outputs.masked_select(pitch_and_energy_masks) energy_outputs = energy_outputs.masked_select(pitch_and_energy_masks) pitch_labels = pitch_labels.masked_select(pitch_and_energy_masks) energy_labels = energy_labels.masked_select(pitch_and_energy_masks) l1_loss = self.l1_criterion(outputs_before_postnet, spectrogram_labels) if outputs_after_postnet is not None: l1_loss = l1_loss + self.l1_criterion(outputs_after_postnet, spectrogram_labels) duration_labels = torch.log(duration_labels.float() + self.log_domain_offset) duration_loss = self.duration_criterion(duration_outputs, duration_labels) pitch_loss = self.mse_criterion(pitch_outputs, pitch_labels) energy_loss = self.mse_criterion(energy_outputs, energy_labels) if self.use_weighted_masking: spectrogram_mask = nn.functional.pad(spectrogram_mask.transpose(1, 2), [0, spectrogram_labels.size(1) - spectrogram_mask.size(1), 0, 0, 0, 0], value=False).transpose(1, 2) out_weights = spectrogram_mask.float() / spectrogram_mask.sum(dim=1, keepdim=True).float() out_weights /= spectrogram_labels.size(0) * spectrogram_labels.size(2) duration_weights = duration_mask.float() / duration_mask.sum(dim=1, keepdim=True).float() duration_weights /= duration_labels.size(0) l1_loss = l1_loss.mul(out_weights).masked_select(spectrogram_mask).sum() duration_loss = duration_loss.mul(duration_weights).masked_select(duration_mask).sum() pitch_weights = duration_weights.unsqueeze(-1) pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum() energy_loss = energy_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum() return l1_loss + duration_loss + pitch_loss + energy_loss class FastSpeech2ConformerPreTrainedModel(PreTrainedModel): config_class = FastSpeech2ConformerConfig base_model_prefix = 'fastspeech2_conformer' main_input_name = 'input_ids' def _init_weights(self, module): if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: key = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-key, b=key) elif isinstance(module, nn.Embedding): module.weight.data.normal_() if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, FastSpeech2ConformerAttention): nn.init.xavier_uniform_(module.pos_bias_u) nn.init.xavier_uniform_(module.pos_bias_v) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, FastSpeech2ConformerEncoder): module.gradient_checkpointing = value @add_start_docstrings('FastSpeech2Conformer Model.', FASTSPEECH2_CONFORMER_START_DOCSTRING) class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel): def __init__(self, config: FastSpeech2ConformerConfig): super().__init__(config) self.config = config self.vocab_size = config.vocab_size self.num_mel_bins = config.num_mel_bins self.hidden_size = config.hidden_size self.reduction_factor = config.reduction_factor self.stop_gradient_from_pitch_predictor = config.stop_gradient_from_pitch_predictor self.stop_gradient_from_energy_predictor = config.stop_gradient_from_energy_predictor self.multilingual_model = config.num_languages is not None and config.num_languages > 1 if self.multilingual_model: self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size) self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1 if self.multispeaker_model: self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size) self.speaker_embed_dim = config.speaker_embed_dim if self.speaker_embed_dim: self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size) self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True) self.duration_predictor = FastSpeech2ConformerDurationPredictor(config) self.pitch_predictor = FastSpeech2ConformerVariancePredictor(config, num_layers=config.pitch_predictor_layers, num_chans=config.pitch_predictor_channels, kernel_size=config.pitch_predictor_kernel_size, dropout_rate=config.pitch_predictor_dropout) self.pitch_embed = FastSpeech2ConformerVarianceEmbedding(out_channels=self.hidden_size, kernel_size=config.pitch_embed_kernel_size, padding=(config.pitch_embed_kernel_size - 1) // 2, dropout_rate=config.pitch_embed_dropout) self.energy_predictor = FastSpeech2ConformerVariancePredictor(config, num_layers=config.energy_predictor_layers, num_chans=config.energy_predictor_channels, kernel_size=config.energy_predictor_kernel_size, dropout_rate=config.energy_predictor_dropout) self.energy_embed = FastSpeech2ConformerVarianceEmbedding(out_channels=self.hidden_size, kernel_size=config.energy_embed_kernel_size, padding=(config.energy_embed_kernel_size - 1) // 2, dropout_rate=config.energy_embed_dropout) self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False) self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config) self.criterion = FastSpeech2ConformerLoss(config) self.post_init() @replace_return_docstrings(output_type=FastSpeech2ConformerModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[Tuple, FastSpeech2ConformerModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states if attention_mask is None: attention_mask = torch.ones(input_ids.shape, device=input_ids.device) has_missing_labels = spectrogram_labels is None or duration_labels is None or pitch_labels is None or (energy_labels is None) if self.training and has_missing_labels: raise ValueError('All labels must be provided to run in training mode.') text_masks = attention_mask.unsqueeze(-2) encoder_outputs = self.encoder(input_ids, text_masks, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) hidden_states = encoder_outputs[0] if self.multispeaker_model and speaker_ids is not None: speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1)) hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1) if self.multilingual_model and lang_ids is not None: language_id_embbedings = self.language_id_embedding(lang_ids.view(-1)) hidden_states = hidden_states + language_id_embbedings.unsqueeze(1) if self.speaker_embed_dim is not None and speaker_embedding is not None: embeddings_expanded = nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1) hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1)) duration_mask = ~attention_mask.bool() if self.stop_gradient_from_pitch_predictor: pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1)) else: pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1)) if self.stop_gradient_from_energy_predictor: energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1)) else: energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1)) duration_predictions = self.duration_predictor(hidden_states) duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0) if not self.training: embedded_pitch_curve = self.pitch_embed(pitch_predictions) embedded_energy_curve = self.energy_embed(energy_predictions) hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed) else: embedded_pitch_curve = self.pitch_embed(pitch_labels) embedded_energy_curve = self.energy_embed(energy_labels) hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve hidden_states = length_regulator(hidden_states, duration_labels) if not self.training: hidden_mask = None else: spectrogram_mask = (spectrogram_labels != -100).any(dim=-1) spectrogram_mask = spectrogram_mask.int() if self.reduction_factor > 1: length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor spectrogram_mask = spectrogram_mask[:, :, :length_dim] hidden_mask = spectrogram_mask.unsqueeze(-2) decoder_outputs = self.decoder(hidden_states, hidden_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) (outputs_before_postnet, outputs_after_postnet) = self.speech_decoder_postnet(decoder_outputs[0]) loss = None if self.training: loss_duration_mask = ~duration_mask loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool() loss = self.criterion(outputs_after_postnet=outputs_after_postnet, outputs_before_postnet=outputs_before_postnet, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, duration_mask=loss_duration_mask, spectrogram_mask=loss_spectrogram_mask) if not return_dict: postnet_outputs = (outputs_after_postnet,) audio_feature_predictions = (duration_predictions, pitch_predictions, energy_predictions) outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions return (loss,) + outputs if loss is not None else outputs return FastSpeech2ConformerModelOutput(loss=loss, spectrogram=outputs_after_postnet, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions) class HifiGanResidualBlock(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList([nn.Conv1d(channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i])) for i in range(len(dilation))]) self.convs2 = nn.ModuleList([nn.Conv1d(channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1)) for _ in range(len(dilation))]) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): for layer in self.convs1: nn.utils.weight_norm(layer) for layer in self.convs2: nn.utils.weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for (conv1, conv2) in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states @add_start_docstrings('HiFi-GAN vocoder.', HIFIGAN_START_DOCSTRING) class FastSpeech2ConformerHifiGan(PreTrainedModel): config_class = FastSpeech2ConformerHifiGanConfig main_input_name = 'spectrogram' def __init__(self, config: FastSpeech2ConformerHifiGanConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.num_upsamples = len(config.upsample_rates) self.conv_pre = nn.Conv1d(config.model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3) self.upsampler = nn.ModuleList() for (i, (upsample_rate, kernel_size)) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): self.upsampler.append(nn.ConvTranspose1d(config.upsample_initial_channel // 2 ** i, config.upsample_initial_channel // 2 ** (i + 1), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2)) self.resblocks = nn.ModuleList() for i in range(len(self.upsampler)): channels = config.upsample_initial_channel // 2 ** (i + 1) for (kernel_size, dilation) in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) self.register_buffer('mean', torch.zeros(config.model_in_dim)) self.register_buffer('scale', torch.ones(config.model_in_dim)) self.post_init() def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): nn.utils.weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.weight_norm(layer) for layer in self.resblocks: layer.apply_weight_norm() nn.utils.weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: if self.config.normalize_before: spectrogram = (spectrogram - self.mean) / self.scale is_batched = spectrogram.dim() == 3 if not is_batched: spectrogram = spectrogram.unsqueeze(0) hidden_states = spectrogram.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) if not is_batched: waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1) else: waveform = hidden_states.squeeze(1) return waveform @add_start_docstrings('The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).', FASTSPEECH2_CONFORMER_WITH_HIFIGAN_START_DOCSTRING) class FastSpeech2ConformerWithHifiGan(PreTrainedModel): config_class = FastSpeech2ConformerWithHifiGanConfig def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig): super().__init__(config) self.model = FastSpeech2ConformerModel(config.model_config) self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config) self.config = config @replace_return_docstrings(output_type=FastSpeech2ConformerWithHifiGanOutput, config_class=FastSpeech2ConformerWithHifiGanConfig) def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[Tuple, FastSpeech2ConformerModelOutput]: return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.model_config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states model_outputs = self.model(input_ids, attention_mask, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, speaker_ids=speaker_ids, lang_ids=lang_ids, speaker_embedding=speaker_embedding, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states) if not return_dict: has_missing_labels = spectrogram_labels is None or duration_labels is None or pitch_labels is None or (energy_labels is None) if has_missing_labels: spectrogram = model_outputs[0] else: spectrogram = model_outputs[1] else: spectrogram = model_outputs['spectrogram'] waveform = self.vocoder(spectrogram) if not return_dict: return model_outputs + (waveform,) return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs) # File: transformers-main/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py """""" import json import os from typing import Optional, Tuple import regex from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging, requires_backends logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json'} class FastSpeech2ConformerTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='', eos_token='', pad_token='', unk_token='', should_strip_spaces=False, **kwargs): requires_backends(self, 'g2p_en') with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) import g2p_en self.g2p = g2p_en.G2p() self.decoder = {v: k for (k, v) in self.encoder.items()} super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, should_strip_spaces=should_strip_spaces, **kwargs) self.should_strip_spaces = should_strip_spaces @property def vocab_size(self): return len(self.decoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): text = regex.sub(';', ',', text) text = regex.sub(':', ',', text) text = regex.sub('-', ' ', text) text = regex.sub('&', 'and', text) text = regex.sub('[\\(\\)\\[\\]\\<\\>\\"]+', '', text) text = regex.sub('\\s+', ' ', text) text = text.upper() return (text, kwargs) def _tokenize(self, text): tokens = self.g2p(text) if self.should_strip_spaces: tokens = list(filter(lambda s: s != ' ', tokens)) tokens.append(self.eos_token) return tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def decode(self, token_ids, **kwargs): logger.warning('Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead.') return self.convert_ids_to_tokens(token_ids) def convert_tokens_to_string(self, tokens, **kwargs): logger.warning('Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens.') return tokens def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.get_vocab(), ensure_ascii=False)) return (vocab_file,) def __getstate__(self): state = self.__dict__.copy() state['g2p'] = None return state def __setstate__(self, d): self.__dict__ = d try: import g2p_en self.g2p = g2p_en.G2p() except ImportError: raise ImportError('You need to install g2p-en to use FastSpeech2ConformerTokenizer. See https://pypi.org/project/g2p-en/ for installation.') # File: transformers-main/src/transformers/models/flaubert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {'configuration_flaubert': ['FlaubertConfig', 'FlaubertOnnxConfig'], 'tokenization_flaubert': ['FlaubertTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flaubert'] = ['FlaubertForMultipleChoice', 'FlaubertForQuestionAnswering', 'FlaubertForQuestionAnsweringSimple', 'FlaubertForSequenceClassification', 'FlaubertForTokenClassification', 'FlaubertModel', 'FlaubertWithLMHeadModel', 'FlaubertPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_flaubert'] = ['TFFlaubertForMultipleChoice', 'TFFlaubertForQuestionAnsweringSimple', 'TFFlaubertForSequenceClassification', 'TFFlaubertForTokenClassification', 'TFFlaubertModel', 'TFFlaubertPreTrainedModel', 'TFFlaubertWithLMHeadModel'] if TYPE_CHECKING: from .configuration_flaubert import FlaubertConfig, FlaubertOnnxConfig from .tokenization_flaubert import FlaubertTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flaubert import FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertPreTrainedModel, FlaubertWithLMHeadModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_flaubert import TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertPreTrainedModel, TFFlaubertWithLMHeadModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/flaubert/configuration_flaubert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class FlaubertConfig(PretrainedConfig): model_type = 'flaubert' attribute_map = {'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size'} def __init__(self, pre_norm=False, layerdrop=0.0, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** (-0.5), layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, mask_token_id=0, lang_id=0, pad_token_id=2, bos_token_id=0, **kwargs): self.pre_norm = pre_norm self.layerdrop = layerdrop self.vocab_size = vocab_size self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top self.mask_token_id = mask_token_id self.lang_id = lang_id if 'n_words' in kwargs: self.n_words = kwargs['n_words'] super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs) class FlaubertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/flaubert/modeling_flaubert.py """""" import itertools import math from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_flaubert import FlaubertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'flaubert/flaubert_base_cased' _CONFIG_FOR_DOC = 'FlaubertConfig' def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() def get_masks(slen, lengths, causal, padding_mask=None): alen = torch.arange(slen, dtype=torch.long, device=lengths.device) if padding_mask is not None: mask = padding_mask else: assert lengths.max().item() <= slen mask = alen < lengths[:, None] bs = lengths.size(0) if causal: attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None] else: attn_mask = mask assert mask.size() == (bs, slen) assert causal is False or attn_mask.size() == (bs, slen, slen) return (mask, attn_mask) class MultiHeadAttention(nn.Module): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config): super().__init__() self.layer_id = next(MultiHeadAttention.NEW_ID) self.dim = dim self.n_heads = n_heads self.dropout = config.attention_dropout assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) self.out_lin = nn.Linear(dim, dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads) self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False): (bs, qlen, dim) = input.size() if kv is None: klen = qlen if cache is None else cache['slen'] + qlen else: klen = kv.size(1) n_heads = self.n_heads dim_per_head = self.dim // n_heads mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen) def shape(x): return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(input)) if kv is None: k = shape(self.k_lin(input)) v = shape(self.v_lin(input)) elif cache is None or self.layer_id not in cache: k = v = kv k = shape(self.k_lin(k)) v = shape(self.v_lin(v)) if cache is not None: if self.layer_id in cache: if kv is None: (k_, v_) = cache[self.layer_id] k = torch.cat([k_, k], dim=2) v = torch.cat([v_, v], dim=2) else: (k, v) = cache[self.layer_id] cache[self.layer_id] = (k, v) q = q / math.sqrt(dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) mask = (mask == 0).view(mask_reshape).expand_as(scores) scores.masked_fill_(mask, torch.finfo(scores.dtype).min) weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) context = unshape(context) outputs = (self.out_lin(context),) if output_attentions: outputs = outputs + (weights,) return outputs class TransformerFFN(nn.Module): def __init__(self, in_dim, dim_hidden, out_dim, config): super().__init__() self.dropout = config.dropout self.lin1 = nn.Linear(in_dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, out_dim) self.act = gelu if config.gelu_activation else nn.functional.relu self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward(self, input): return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input) def ff_chunk(self, input): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) return x FLAUBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FLAUBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Length of each sentence that can be used to avoid performing attention on padding token indices. You can\n also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in\n `[0, ..., input_ids.size(-1)]`:\n cache (`Dict[str, torch.FloatTensor]`, *optional*):\n Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the\n attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential\n decoding. The dictionary object will be modified in-place during the forward pass to add newly computed\n hidden-states.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.', FLAUBERT_START_DOCSTRING) class FlaubertPredLayer(nn.Module): def __init__(self, config): super().__init__() self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index dim = config.emb_dim if config.asm is False: self.proj = nn.Linear(dim, config.n_words, bias=True) else: self.proj = nn.AdaptiveLogSoftmaxWithLoss(in_features=dim, n_classes=config.n_words, cutoffs=config.asm_cutoffs, div_value=config.asm_div_value, head_bias=True) def forward(self, x, y=None): outputs = () if self.asm is False: scores = self.proj(x) outputs = (scores,) + outputs if y is not None: loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction='mean') outputs = (loss,) + outputs else: scores = self.proj.log_prob(x) outputs = (scores,) + outputs if y is not None: (_, loss) = self.proj(x, y) outputs = (loss,) + outputs return outputs class FlaubertPreTrainedModel(PreTrainedModel): config_class = FlaubertConfig load_tf_weights = None base_model_prefix = 'transformer' def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) @property def dummy_inputs(self): inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) if self.config.use_lang_emb and self.config.n_langs > 1: langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) else: langs_list = None return {'input_ids': inputs_list, 'attention_mask': attns_list, 'langs': langs_list} def _init_weights(self, module): if isinstance(module, nn.Embedding): if self.config is not None and self.config.embed_init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, nn.Linear): if self.config is not None and self.config.init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.init_std) if module.bias is not None: nn.init.constant_(module.bias, 0.0) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, FlaubertModel) and self.config.sinusoidal_embeddings: create_sinusoidal_embeddings(self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight) class FlaubertModel(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError('Currently Flaubert can only be used as an encoder') self.causal = config.causal self.n_langs = config.n_langs self.use_lang_emb = config.use_lang_emb self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index self.dim = config.emb_dim self.hidden_dim = self.dim * 4 self.n_heads = config.n_heads self.n_layers = config.n_layers self.dropout = config.dropout self.attention_dropout = config.attention_dropout assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads' self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim) if config.n_langs > 1 and config.use_lang_emb: self.lang_embeddings = nn.Embedding(self.n_langs, self.dim) self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index) self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) self.attentions = nn.ModuleList() self.layer_norm1 = nn.ModuleList() self.ffns = nn.ModuleList() self.layer_norm2 = nn.ModuleList() for _ in range(self.n_layers): self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config)) self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config)) self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) if hasattr(config, 'pruned_heads'): pruned_heads = config.pruned_heads.copy().items() config.pruned_heads = {} for (layer, heads) in pruned_heads: if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) self.post_init() self.layerdrop = getattr(config, 'layerdrop', 0.0) self.pre_norm = getattr(config, 'pre_norm', False) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, new_embeddings): self.embeddings = new_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.attentions[layer].prune_heads(heads) @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, lengths: Optional[torch.LongTensor]=None, cache: Optional[Dict[str, torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: (bs, slen) = input_ids.size() else: (bs, slen) = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if lengths is None: if input_ids is not None: lengths = (input_ids != self.pad_index).sum(dim=1).long() else: lengths = torch.tensor([slen] * bs, device=device) assert lengths.size(0) == bs assert lengths.max().item() <= slen (mask, attn_mask) = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) if position_ids is None: if hasattr(self, 'position_ids'): position_ids = self.position_ids[:, :slen] position_ids = position_ids.expand((bs, slen)) else: position_ids = torch.arange(slen, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand((bs, slen)) else: assert position_ids.size() == (bs, slen) if langs is not None: assert langs.size() == (bs, slen) head_mask = self.get_head_mask(head_mask, self.config.n_layers) if cache is not None and input_ids is not None: _slen = slen - cache['slen'] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds) if langs is not None and self.use_lang_emb and (self.config.n_langs > 1): tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training) tensor *= mask.unsqueeze(-1).to(tensor.dtype) hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if output_hidden_states: hidden_states = hidden_states + (tensor,) if not self.pre_norm: attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i], output_attentions=output_attentions) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = nn.functional.dropout(attn, p=self.dropout, training=self.training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) else: tensor_normalized = self.layer_norm1[i](tensor) attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i]) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = nn.functional.dropout(attn, p=self.dropout, training=self.training) tensor = tensor + attn if not self.pre_norm: tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) else: tensor_normalized = self.layer_norm2[i](tensor) tensor = tensor + self.ffns[i](tensor_normalized) tensor *= mask.unsqueeze(-1).to(tensor.dtype) if output_hidden_states: hidden_states = hidden_states + (tensor,) if cache is not None: cache['slen'] += tensor.size(1) if not return_dict: return tuple((v for v in [tensor, hidden_states, attentions] if v is not None)) return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) @add_start_docstrings('\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', FLAUBERT_START_DOCSTRING) class FlaubertWithLMHeadModel(FlaubertPreTrainedModel): _tied_weights_keys = ['pred_layer.proj.weight'] def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) self.pred_layer = FlaubertPredLayer(config) self.post_init() def get_output_embeddings(self): return self.pred_layer.proj def set_output_embeddings(self, new_embeddings): self.pred_layer.proj = new_embeddings def prepare_inputs_for_generation(self, input_ids, **kwargs): mask_token_id = self.config.mask_token_id lang_id = self.config.lang_id effective_batch_size = input_ids.shape[0] mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device) input_ids = torch.cat([input_ids, mask_token], dim=1) if lang_id is not None: langs = torch.full_like(input_ids, lang_id) else: langs = None return {'input_ids': input_ids, 'langs': langs} @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='') def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) output = transformer_outputs[0] outputs = self.pred_layer(output, labels) if not return_dict: return outputs + transformer_outputs[1:] return MaskedLMOutput(loss=outputs[0] if labels is not None else None, logits=outputs[0] if labels is None else outputs[1], hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n ', FLAUBERT_START_DOCSTRING) class FlaubertForSequenceClassification(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.transformer = FlaubertModel(config) self.sequence_summary = SequenceSummary(config) self.post_init() @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) output = transformer_outputs[0] logits = self.sequence_summary(output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', FLAUBERT_START_DOCSTRING) class FlaubertForTokenClassification(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = FlaubertModel(config) self.dropout = nn.Dropout(config.dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FLAUBERT_START_DOCSTRING) class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FLAUBERT_START_DOCSTRING) @dataclass class FlaubertForQuestionAnsweringOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None class FlaubertForQuestionAnswering(FlaubertPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = FlaubertModel(config) self.qa_outputs = SQuADHead(config) self.post_init() @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=FlaubertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, is_impossible: Optional[torch.Tensor]=None, cls_index: Optional[torch.Tensor]=None, p_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FlaubertForQuestionAnsweringOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) output = transformer_outputs[0] outputs = self.qa_outputs(output, start_positions=start_positions, end_positions=end_positions, cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask, return_dict=return_dict) if not return_dict: return outputs + transformer_outputs[1:] return FlaubertForQuestionAnsweringOutput(loss=outputs.loss, start_top_log_probs=outputs.start_top_log_probs, start_top_index=outputs.start_top_index, end_top_log_probs=outputs.end_top_log_probs, end_top_index=outputs.end_top_index, cls_logits=outputs.cls_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', FLAUBERT_START_DOCSTRING) class FlaubertForMultipleChoice(FlaubertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = FlaubertModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.num_labels, 1) self.post_init() @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[Dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None langs = langs.view(-1, langs.size(-1)) if langs is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None if lengths is not None: logger.warning('The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the attention mask instead.') lengths = None transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/flaubert/modeling_tf_flaubert.py """""" from __future__ import annotations import itertools import random import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_flaubert import FlaubertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'flaubert/flaubert_base_cased' _CONFIG_FOR_DOC = 'FlaubertConfig' FLAUBERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FLAUBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - `1` for tokens that are **not masked**,\n - `0` for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are\n languages ids which can be obtained from the language names by using two conversion mappings provided in\n the configuration of the model (only provided for multilingual models). More precisely, the *language name\n to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the\n *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).\n\n See usage examples detailed in the [multilingual documentation](../multilingual).\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - `0` corresponds to a *sentence A* token,\n - `1` corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):\n Length of each sentence that can be used to avoid performing attention on padding token indices. You can\n also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in\n `[0, ..., input_ids.size(-1)]`:\n cache (`Dict[str, tf.Tensor]`, *optional*):\n Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the\n attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential\n decoding.\n\n The dictionary object will be modified in-place during the forward pass to add newly computed\n hidden-states.\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - `1` indicates the head is **not masked**,\n - `0` indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" def get_masks(slen, lengths, causal, padding_mask=None): bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) if causal: attn_mask = tf.less_equal(tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))) else: attn_mask = mask tf.debugging.assert_equal(shape_list(mask), [bs, slen]) if causal: tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return (mask, attn_mask) class TFFlaubertPreTrainedModel(TFPreTrainedModel): config_class = FlaubertConfig base_model_prefix = 'transformer' @property def dummy_inputs(self): inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32) attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32) if self.config.use_lang_emb and self.config.n_langs > 1: return {'input_ids': inputs_list, 'attention_mask': attns_list, 'langs': tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32)} else: return {'input_ids': inputs_list, 'attention_mask': attns_list} @add_start_docstrings('The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.', FLAUBERT_START_DOCSTRING) class TFFlaubertModel(TFFlaubertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFFlaubertMainLayer(config, name='transformer') @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutput]: outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) class TFFlaubertMultiHeadAttention(keras.layers.Layer): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config, **kwargs): super().__init__(**kwargs) self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID) self.dim = dim self.n_heads = n_heads self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name='q_lin') self.k_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name='k_lin') self.v_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name='v_lin') self.out_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name='out_lin') self.dropout = keras.layers.Dropout(config.attention_dropout) self.pruned_heads = set() self.dim = dim def prune_heads(self, heads): raise NotImplementedError def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False): (bs, qlen, dim) = shape_list(input) if kv is None: klen = qlen if cache is None else cache['slen'] + qlen else: klen = shape_list(kv)[1] dim_per_head = self.dim // self.n_heads mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen) def shape(x): return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) def unshape(x): return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) q = shape(self.q_lin(input)) if kv is None: k = shape(self.k_lin(input)) v = shape(self.v_lin(input)) elif cache is None or self.layer_id not in cache: k = v = kv k = shape(self.k_lin(k)) v = shape(self.v_lin(v)) if cache is not None: if self.layer_id in cache: if kv is None: (k_, v_) = cache[self.layer_id] k = tf.concat([k_, k], axis=2) v = tf.concat([v_, v], axis=2) else: (k, v) = cache[self.layer_id] cache[self.layer_id] = (k, v) f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype) q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) k = tf.cast(k, dtype=q.dtype) scores = tf.matmul(q, k, transpose_b=True) mask = tf.reshape(mask, mask_reshape) mask = tf.cast(mask, dtype=scores.dtype) scores = scores - 1e+30 * (1.0 - mask) weights = stable_softmax(scores, axis=-1) weights = self.dropout(weights, training=training) if head_mask is not None: weights = weights * head_mask context = tf.matmul(weights, v) context = unshape(context) outputs = (self.out_lin(context),) if output_attentions: outputs = outputs + (weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_lin', None) is not None: with tf.name_scope(self.q_lin.name): self.q_lin.build([None, None, self.dim]) if getattr(self, 'k_lin', None) is not None: with tf.name_scope(self.k_lin.name): self.k_lin.build([None, None, self.dim]) if getattr(self, 'v_lin', None) is not None: with tf.name_scope(self.v_lin.name): self.v_lin.build([None, None, self.dim]) if getattr(self, 'out_lin', None) is not None: with tf.name_scope(self.out_lin.name): self.out_lin.build([None, None, self.dim]) class TFFlaubertTransformerFFN(keras.layers.Layer): def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs): super().__init__(**kwargs) self.lin1 = keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name='lin1') self.lin2 = keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name='lin2') self.act = get_tf_activation('gelu') if config.gelu_activation else get_tf_activation('relu') self.dropout = keras.layers.Dropout(config.dropout) self.in_dim = in_dim self.dim_hidden = dim_hidden def call(self, input, training=False): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = self.dropout(x, training=training) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'lin1', None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.in_dim]) if getattr(self, 'lin2', None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.dim_hidden]) @keras_serializable class TFFlaubertMainLayer(keras.layers.Layer): config_class = FlaubertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.n_heads = config.n_heads self.n_langs = config.n_langs self.dim = config.emb_dim self.hidden_dim = self.dim * 4 self.n_words = config.n_words self.pad_index = config.pad_index self.causal = config.causal self.n_layers = config.n_layers self.use_lang_emb = config.use_lang_emb self.layerdrop = getattr(config, 'layerdrop', 0.0) self.pre_norm = getattr(config, 'pre_norm', False) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.max_position_embeddings = config.max_position_embeddings self.embed_init_std = config.embed_init_std self.dropout = keras.layers.Dropout(config.dropout) self.embeddings = TFSharedEmbeddings(self.n_words, self.dim, initializer_range=config.embed_init_std, name='embeddings') self.layer_norm_emb = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm_emb') self.attentions = [] self.layer_norm1 = [] self.ffns = [] self.layer_norm2 = [] for i in range(self.n_layers): self.attentions.append(TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f'attentions_._{i}')) self.layer_norm1.append(keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f'layer_norm1_._{i}')) self.ffns.append(TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f'ffns_._{i}')) self.layer_norm2.append(keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f'layer_norm2_._{i}')) def build(self, input_shape=None): with tf.name_scope('position_embeddings'): self.position_embeddings = self.add_weight(name='embeddings', shape=[self.max_position_embeddings, self.dim], initializer=get_initializer(self.embed_init_std)) if self.n_langs > 1 and self.use_lang_emb: with tf.name_scope('lang_embeddings'): self.lang_embeddings = self.add_weight(name='embeddings', shape=[self.n_langs, self.dim], initializer=get_initializer(self.embed_init_std)) if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'layer_norm_emb', None) is not None: with tf.name_scope(self.layer_norm_emb.name): self.layer_norm_emb.build([None, None, self.dim]) for layer in self.attentions: with tf.name_scope(layer.name): layer.build(None) for layer in self.layer_norm1: with tf.name_scope(layer.name): layer.build([None, None, self.dim]) for layer in self.ffns: with tf.name_scope(layer.name): layer.build(None) for layer in self.layer_norm2: with tf.name_scope(layer.name): layer.build([None, None, self.dim]) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] @unpack_inputs def call(self, input_ids: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutput]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: (bs, slen) = shape_list(input_ids) elif inputs_embeds is not None: (bs, slen) = shape_list(inputs_embeds)[:2] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if lengths is None: if input_ids is not None: lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1) else: lengths = tf.convert_to_tensor([slen] * bs) (tf.debugging.assert_equal(shape_list(lengths)[0], bs), f'Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched') (mask, attn_mask) = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) if position_ids is None: position_ids = tf.expand_dims(tf.range(slen), axis=0) position_ids = tf.tile(position_ids, (bs, 1)) (tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]), f'Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched') if langs is not None: (tf.debugging.assert_equal(shape_list(langs), [bs, slen]), f'Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched') if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layers if cache is not None and input_ids is not None: _slen = slen - cache['slen'] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) if langs is not None and self.use_lang_emb: tensor = tensor + tf.gather(self.lang_embeddings, langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = self.dropout(tensor, training=training) mask = tf.cast(mask, dtype=tensor.dtype) tensor = tensor * tf.expand_dims(mask, axis=-1) hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): dropout_probability = random.uniform(0, 1) if training and dropout_probability < self.layerdrop: continue if output_hidden_states: hidden_states = hidden_states + (tensor,) if not self.pre_norm: attn_outputs = self.attentions[i](tensor, attn_mask, None, cache, head_mask[i], output_attentions, training=training) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) else: tensor_normalized = self.layer_norm1[i](tensor) attn_outputs = self.attentions[i](tensor_normalized, attn_mask, None, cache, head_mask[i], output_attentions, training=training) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn if not self.pre_norm: tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) else: tensor_normalized = self.layer_norm2[i](tensor) tensor = tensor + self.ffns[i](tensor_normalized) tensor = tensor * tf.expand_dims(mask, axis=-1) if output_hidden_states: hidden_states = hidden_states + (tensor,) if cache is not None: cache['slen'] += tensor.size(1) if not return_dict: return tuple((v for v in [tensor, hidden_states, attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) class TFFlaubertPredLayer(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index if config.asm is False: self.input_embeddings = input_embeddings else: raise NotImplementedError def build(self, input_shape): self.bias = self.add_weight(shape=(self.n_words,), initializer='zeros', trainable=True, name='bias') super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {'bias': self.bias} def set_bias(self, value): self.bias = value['bias'] self.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode='linear') hidden_states = hidden_states + self.bias return hidden_states @dataclass class TFFlaubertWithLMHeadModelOutput(ModelOutput): logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None @add_start_docstrings('\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', FLAUBERT_START_DOCSTRING) class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFFlaubertMainLayer(config, name='transformer') self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name='pred_layer_._proj') self.supports_xla_generation = False def get_lm_head(self): return self.pred_layer def get_prefix_bias_name(self): warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.pred_layer.name def prepare_inputs_for_generation(self, inputs, **kwargs): mask_token_id = self.config.mask_token_id lang_id = self.config.lang_id effective_batch_size = inputs.shape[0] mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id inputs = tf.concat([inputs, mask_token], axis=1) if lang_id is not None: langs = tf.ones_like(inputs) * lang_id else: langs = None return {'input_ids': inputs, 'langs': langs} @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFFlaubertWithLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]: transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) output = transformer_outputs[0] outputs = self.pred_layer(output) if not return_dict: return (outputs,) + transformer_outputs[1:] return TFFlaubertWithLMHeadModelOutput(logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'pred_layer', None) is not None: with tf.name_scope(self.pred_layer.name): self.pred_layer.build(None) @add_start_docstrings('\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n ', FLAUBERT_START_DOCSTRING) class TFFlaubertForSequenceClassification(TFFlaubertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFFlaubertMainLayer(config, name='transformer') self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name='sequence_summary') @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) output = transformer_outputs[0] logits = self.sequence_summary(output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'sequence_summary', None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) @add_start_docstrings('\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FLAUBERT_START_DOCSTRING) class TFFlaubertForQuestionAnsweringSimple(TFFlaubertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFFlaubertMainLayer(config, name='transformer') self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.init_std), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', FLAUBERT_START_DOCSTRING) class TFFlaubertForTokenClassification(TFFlaubertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFFlaubertMainLayer(config, name='transformer') self.dropout = keras.layers.Dropout(config.dropout) self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.init_std), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = transformer_outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', FLAUBERT_START_DOCSTRING) class TFFlaubertForMultipleChoice(TFFlaubertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFFlaubertMainLayer(config, name='transformer') self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name='sequence_summary') self.logits_proj = keras.layers.Dense(1, kernel_initializer=get_initializer(config.initializer_range), name='logits_proj') self.config = config @property def dummy_inputs(self): if self.config.use_lang_emb and self.config.n_langs > 1: return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), 'langs': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)} else: return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)} @unpack_inputs @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, langs: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, lengths: np.ndarray | tf.Tensor | None=None, cache: Optional[Dict[str, tf.Tensor]]=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None flat_inputs_embeds = tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None if lengths is not None: logger.warning('The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the attention mask instead.') lengths = None transformer_outputs = self.transformer(flat_input_ids, flat_attention_mask, flat_langs, flat_token_type_ids, flat_position_ids, lengths, cache, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'sequence_summary', None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, 'logits_proj', None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.num_labels]) # File: transformers-main/src/transformers/models/flaubert/tokenization_flaubert.py """""" import json import os import re import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} def convert_to_unicode(text): def ensure_text(s, encoding='utf-8', errors='strict'): if isinstance(s, bytes): return s.decode(encoding, errors) elif isinstance(s, str): return s else: raise TypeError(f"not expecting type '{type(s)}'") return ensure_text(text, encoding='utf-8', errors='ignore') def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): text = text.replace(',', ',') text = re.sub('。\\s*', '. ', text) text = text.replace('、', ',') text = text.replace('”', '"') text = text.replace('“', '"') text = text.replace('∶', ':') text = text.replace(':', ':') text = text.replace('?', '?') text = text.replace('《', '"') text = text.replace('》', '"') text = text.replace(')', ')') text = text.replace('!', '!') text = text.replace('(', '(') text = text.replace(';', ';') text = text.replace('1', '1') text = text.replace('」', '"') text = text.replace('「', '"') text = text.replace('0', '0') text = text.replace('3', '3') text = text.replace('2', '2') text = text.replace('5', '5') text = text.replace('6', '6') text = text.replace('9', '9') text = text.replace('7', '7') text = text.replace('8', '8') text = text.replace('4', '4') text = re.sub('.\\s*', '. ', text) text = text.replace('~', '~') text = text.replace('’', "'") text = text.replace('…', '...') text = text.replace('━', '-') text = text.replace('〈', '<') text = text.replace('〉', '>') text = text.replace('【', '[') text = text.replace('】', ']') text = text.replace('%', '%') return text def remove_non_printing_char(text): output = [] for char in text: cat = unicodedata.category(char) if cat.startswith('C'): continue output.append(char) return ''.join(output) class FlaubertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, merges_file, do_lowercase=False, unk_token='', bos_token='', sep_token='', pad_token='', cls_token='', mask_token='', additional_special_tokens=['', '', '', '', '', '', '', '', '', ''], lang2id=None, id2lang=None, **kwargs): do_lowercase_and_remove_accent = kwargs.pop('do_lowercase_and_remove_accent', None) if do_lowercase_and_remove_accent is not None: logger.warning("`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything. `FlaubertTokenizer` will always set it to `False`.") self.do_lowercase_and_remove_accent = False self.do_lowercase = do_lowercase try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use FlaubertTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses self.cache_moses_punct_normalizer = {} self.cache_moses_tokenizer = {} self.lang_with_custom_tokenizer = {'zh', 'th', 'ja'} self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(do_lowercase=do_lowercase, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, **kwargs) @property def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin") except (AttributeError, ImportError): logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps") logger.error('1. git clone git@github.com:neubig/kytea.git && cd kytea') logger.error('2. autoreconf -i') logger.error('3. ./configure --prefix=$HOME/local') logger.error('4. make && make install') logger.error('5. pip install kytea') raise return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + '' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def preprocess_text(self, text): text = text.replace('``', '"').replace("''", '"') text = convert_to_unicode(text) text = unicodedata.normalize('NFC', text) if self.do_lowercase: text = text.lower() return text def _tokenize(self, text, bypass_tokenizer=False): lang = 'fr' if lang and self.lang2id and (lang not in self.lang2id): logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.') if bypass_tokenizer: text = text.split() else: text = self.preprocess_text(text) text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace('', ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def __getstate__(self): state = self.__dict__.copy() state['sm'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses # File: transformers-main/src/transformers/models/flava/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_flava': ['FlavaConfig', 'FlavaImageCodebookConfig', 'FlavaImageConfig', 'FlavaMultimodalConfig', 'FlavaTextConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_flava'] = ['FlavaFeatureExtractor'] _import_structure['image_processing_flava'] = ['FlavaImageProcessor'] _import_structure['processing_flava'] = ['FlavaProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flava'] = ['FlavaForPreTraining', 'FlavaImageCodebook', 'FlavaImageModel', 'FlavaModel', 'FlavaMultimodalModel', 'FlavaPreTrainedModel', 'FlavaTextModel'] if TYPE_CHECKING: from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_flava import FlavaFeatureExtractor from .image_processing_flava import FlavaImageProcessor from .processing_flava import FlavaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flava import FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, FlavaModel, FlavaMultimodalModel, FlavaPreTrainedModel, FlavaTextModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/flava/configuration_flava.py """""" import os from typing import Any, Dict, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FlavaImageConfig(PretrainedConfig): model_type = 'flava_image_model' def __init__(self, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, image_size: int=224, patch_size: int=16, num_channels: int=3, qkv_bias: bool=True, mask_token: bool=True, vocab_size: int=8192, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'flava': config_dict = config_dict['image_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class FlavaTextConfig(PretrainedConfig): model_type = 'flava_text_model' def __init__(self, vocab_size: int=30522, type_vocab_size: int=2, max_position_embeddings: int=512, position_embedding_type: str='absolute', hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, pad_token_id: int=0, qkv_bias: bool=True, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'flava': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class FlavaMultimodalConfig(PretrainedConfig): model_type = 'flava_multimodal_model' def __init__(self, hidden_size: int=768, num_hidden_layers: int=6, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: int=0.0, attention_probs_dropout_prob: int=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, qkv_bias: bool=True, use_cls_token: bool=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_cls_token = use_cls_token @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'flava': config_dict = config_dict['multimodal_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class FlavaImageCodebookConfig(PretrainedConfig): model_type = 'flava_image_codebook' '' def __init__(self, num_groups: int=4, input_channels: int=3, num_blocks_per_group: int=2, hidden_size: int=256, vocab_size: int=8192, freeze: int=True, initializer_range: float=0.02, **kwargs): super().__init__(**kwargs) self.num_groups = num_groups self.input_channels = input_channels self.num_blocks_per_group = num_blocks_per_group self.hidden_size = hidden_size self.vocab_size = vocab_size self.freeze = freeze self.initializer_range = initializer_range @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'flava': config_dict = config_dict['image_codebook_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class FlavaConfig(PretrainedConfig): model_type = 'flava' def __init__(self, image_config: Dict[str, Any]=None, text_config: Dict[str, Any]=None, multimodal_config: Dict[str, Any]=None, image_codebook_config: Dict[str, Any]=None, hidden_size: int=768, layer_norm_eps: float=1e-12, projection_dim: int=768, init_codebook: bool=True, logit_scale_init_value: float=2.6592, initializer_range: float=0.02, ce_ignore_index: int=-100, mim_weight: float=1.0, mlm_weight: float=1.0, global_contrastive_weight: float=1.0, itm_weight: float=1.0, mmm_image_weight: float=1.0, mmm_text_weight: float=1.0, global_backprop_contrastive: bool=True, skip_unmasked_multimodal_encoder: bool=True, return_loss: bool=True, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) image_config_dict = kwargs.pop('image_config_dict', None) multimodal_config_dict = kwargs.pop('multimodal_config_dict', None) image_codebook_config_dict = kwargs.pop('image_codebook_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if image_config_dict is not None: if image_config is None: image_config = {} _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict() if 'id2label' in _image_config_dict: _image_config_dict['id2label'] = {str(key): value for (key, value) in _image_config_dict['id2label'].items()} for (key, value) in _image_config_dict.items(): if key in image_config and value != image_config[key] and (key not in ['transformers_version']): if key in image_config_dict: message = f'`{key}` is found in both `image_config_dict` and `image_config` but with different values. The value `image_config_dict["{key}"]` will be used instead.' else: message = f'`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. The value `image_config["{key}"]` will be overridden.' logger.info(message) image_config.update(_image_config_dict) if multimodal_config_dict is not None: if multimodal_config is None: multimodal_config = {} _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict() for (key, value) in _multimodal_config_dict.items(): if key in multimodal_config and value != multimodal_config[key] and (key not in ['transformers_version']): if key in multimodal_config_dict: message = f'`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with different values. The value `multimodal_config_dict["{key}"]` will be used instead.' else: message = f'`multimodal_config_dict` is provided which will be used to initialize `FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overridden.' logger.info(message) multimodal_config.update(_multimodal_config_dict) if image_codebook_config_dict is not None: if image_codebook_config is None: image_codebook_config = {} _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict() for (key, value) in _image_codebook_config_dict.items(): if key in image_codebook_config and value != image_codebook_config[key] and (key not in ['transformers_version']): if key in image_codebook_config_dict: message = f'`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but with different values. The value `image_codebook_config_dict["{key}"]` will be used instead.' else: message = f'`image_codebook_config_dict` is provided which will be used to initialize `FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overridden.' logger.info(message) image_codebook_config.update(_image_codebook_config_dict) if image_config is None: image_config = {} logger.info('`image_config` is `None`. initializing the `FlavaImageConfig` with default values.') if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.') if multimodal_config is None: multimodal_config = {} logger.info('`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.') if image_codebook_config is None: image_codebook_config = {} logger.info('`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values.') self.image_config = FlavaImageConfig(**image_config) self.text_config = FlavaTextConfig(**text_config) self.multimodal_config = FlavaMultimodalConfig(**multimodal_config) self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config) self.projection_dim = projection_dim self.init_codebook = init_codebook self.hidden_size = hidden_size self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.ce_ignore_index = ce_ignore_index self.mim_weight = mim_weight self.mlm_weight = mlm_weight self.global_contrastive_weight = global_contrastive_weight self.itm_weight = itm_weight self.mmm_image_weight = mmm_image_weight self.mmm_text_weight = mmm_text_weight self.global_backprop_contrastive = global_backprop_contrastive self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder self.return_loss = return_loss @classmethod def from_configs(cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs): return cls(image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/flava/convert_dalle_to_flava_codebook.py import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def rreplace(s, old, new, occurrence): li = s.rsplit(old, occurrence) return new.join(li) def count_parameters(state_dict): return sum((param.float().sum() if 'encoder.embeddings' not in key else 0 for (key, param) in state_dict.items())) def upgrade_state_dict(state_dict): upgrade = {} group_keys = ['group_1', 'group_2', 'group_3', 'group_4'] for (key, value) in state_dict.items(): for group_key in group_keys: if group_key in key: key = key.replace(f'{group_key}.', f'{group_key}.group.') if 'res_path' in key: key = key.replace('res_path.', 'res_path.path.') if key.endswith('.w'): key = rreplace(key, '.w', '.weight', 1) if key.endswith('.b'): key = rreplace(key, '.b', '.bias', 1) upgrade[key] = value.float() return upgrade @torch.no_grad() def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True): from dall_e import Encoder encoder = Encoder() if os.path.exists(checkpoint_path): ckpt = torch.load(checkpoint_path) else: ckpt = torch.hub.load_state_dict_from_url(checkpoint_path) if isinstance(ckpt, Encoder): ckpt = ckpt.state_dict() encoder.load_state_dict(ckpt) if config_path is not None: config = FlavaImageCodebookConfig.from_pretrained(config_path) else: config = FlavaImageCodebookConfig() hf_model = FlavaImageCodebook(config).eval() state_dict = encoder.state_dict() hf_state_dict = upgrade_state_dict(state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) assert torch.allclose(hf_count, state_dict_count, atol=0.001) if save_checkpoint: hf_model.save_pretrained(pytorch_dump_folder_path) else: return hf_state_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') args = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) # File: transformers-main/src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def count_parameters(state_dict): return sum((param.float().sum() if 'encoder.embeddings' not in key else 0 for (key, param) in state_dict.items())) def upgrade_state_dict(state_dict, codebook_state_dict): upgrade = {} for (key, value) in state_dict.items(): if 'text_encoder.embeddings' in key or 'image_encoder.embeddings' in key: continue key = key.replace('heads.cmd.mim_head.cls.predictions', 'mmm_image_head') key = key.replace('heads.cmd.mlm_head.cls.predictions', 'mmm_text_head') key = key.replace('heads.cmd.itm_head.cls', 'itm_head') key = key.replace('heads.cmd.itm_head.pooler', 'itm_head.pooler') key = key.replace('heads.cmd.clip_head.logit_scale', 'flava.logit_scale') key = key.replace('heads.fairseq_mlm.cls.predictions', 'mlm_head') key = key.replace('heads.imagenet.mim_head.cls.predictions', 'mim_head') key = key.replace('mm_text_projection', 'flava.text_to_mm_projection') key = key.replace('mm_image_projection', 'flava.image_to_mm_projection') key = key.replace('image_encoder.module', 'flava.image_model') key = key.replace('text_encoder.module', 'flava.text_model') key = key.replace('mm_encoder.module.encoder.cls_token', 'flava.multimodal_model.cls_token') key = key.replace('mm_encoder.module', 'flava.multimodal_model') key = key.replace('text_projection', 'flava.text_projection') key = key.replace('image_projection', 'flava.image_projection') upgrade[key] = value.float() for (key, value) in codebook_state_dict.items(): upgrade[f'image_codebook.{key}'] = value return upgrade @torch.no_grad() def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None): if config_path is not None: config = FlavaConfig.from_pretrained(config_path) else: config = FlavaConfig() hf_model = FlavaForPreTraining(config).eval() codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False) if os.path.exists(checkpoint_path): state_dict = torch.load(checkpoint_path, map_location='cpu') else: state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location='cpu') hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict) hf_model.load_state_dict(hf_state_dict) hf_state_dict = hf_model.state_dict() hf_count = count_parameters(hf_state_dict) state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict) assert torch.allclose(hf_count, state_dict_count, atol=0.001) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') args = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) # File: transformers-main/src/transformers/models/flava/feature_extraction_flava.py """""" import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor logger = logging.get_logger(__name__) class FlavaFeatureExtractor(FlavaImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use FlavaImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/flava/image_processing_flava.py """""" import math import random from functools import lru_cache from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN FLAVA_IMAGE_STD = OPENAI_CLIP_STD FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0] FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0] LOGIT_LAPLACE_EPS: float = 0.1 class FlavaMaskingGenerator: def __init__(self, input_size: Union[int, Tuple[int, int]]=14, total_mask_patches: int=75, mask_group_max_patches: Optional[int]=None, mask_group_min_patches: int=16, mask_group_min_aspect_ratio: Optional[float]=0.3, mask_group_max_aspect_ratio: float=None): if not isinstance(input_size, tuple): input_size = (input_size,) * 2 (self.height, self.width) = input_size self.num_patches = self.height * self.width self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio)) def __repr__(self): repr_str = 'MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)' % (self.height, self.width, self.mask_group_min_patches, self.mask_group_max_patches, self.total_mask_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1]) return repr_str def get_shape(self): return (self.height, self.width) def _mask(self, mask, max_mask_patches): delta = 0 for _attempt in range(10): target_area = random.uniform(self.mask_group_min_patches, max_mask_patches) aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) height = int(round(math.sqrt(target_area * aspect_ratio))) width = int(round(math.sqrt(target_area / aspect_ratio))) if width < self.width and height < self.height: top = random.randint(0, self.height - height) left = random.randint(0, self.width - width) num_masked = mask[top:top + height, left:left + width].sum() if 0 < height * width - num_masked <= max_mask_patches: for i in range(top, top + height): for j in range(left, left + width): if mask[i, j] == 0: mask[i, j] = 1 delta += 1 if delta > 0: break return delta def __call__(self): mask = np.zeros(shape=self.get_shape(), dtype=int) mask_count = 0 while mask_count < self.total_mask_patches: max_mask_patches = self.total_mask_patches - mask_count max_mask_patches = min(max_mask_patches, self.mask_group_max_patches) delta = self._mask(mask, max_mask_patches) if delta == 0: break else: mask_count += delta return mask class FlavaImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[str, int]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, return_image_mask: bool=False, input_size_patches: int=14, total_mask_patches: int=75, mask_group_min_patches: int=16, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: float=0.3, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: bool=False, codebook_do_resize: bool=True, codebook_size: bool=None, codebook_resample: int=PILImageResampling.LANCZOS, codebook_do_center_crop: bool=True, codebook_crop_size: int=None, codebook_do_rescale: bool=True, codebook_rescale_factor: Union[int, float]=1 / 255, codebook_do_map_pixels: bool=True, codebook_do_normalize: bool=True, codebook_image_mean: Optional[Union[float, Iterable[float]]]=None, codebook_image_std: Optional[Union[float, Iterable[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 224, 'width': 224} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size, param_name='crop_size') codebook_size = codebook_size if codebook_size is not None else {'height': 112, 'width': 112} codebook_size = get_size_dict(codebook_size, param_name='codebook_size') codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {'height': 112, 'width': 112} codebook_crop_size = get_size_dict(codebook_crop_size, param_name='codebook_crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD self.return_image_mask = return_image_mask self.input_size_patches = input_size_patches self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = mask_group_max_patches self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio self.return_codebook_pixels = return_codebook_pixels self.codebook_do_resize = codebook_do_resize self.codebook_size = codebook_size self.codebook_resample = codebook_resample self.codebook_do_center_crop = codebook_do_center_crop self.codebook_crop_size = codebook_crop_size self.codebook_do_rescale = codebook_do_rescale self.codebook_rescale_factor = codebook_rescale_factor self.codebook_do_map_pixels = codebook_do_map_pixels self.codebook_do_normalize = codebook_do_normalize self.codebook_image_mean = codebook_image_mean self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): image_processor_dict = image_processor_dict.copy() if 'codebook_size' in kwargs: image_processor_dict['codebook_size'] = kwargs.pop('codebook_size') if 'codebook_crop_size' in kwargs: image_processor_dict['codebook_crop_size'] = kwargs.pop('codebook_crop_size') return super().from_dict(image_processor_dict, **kwargs) @lru_cache() def masking_generator(self, input_size_patches, total_mask_patches, mask_group_min_patches, mask_group_max_patches, mask_group_min_aspect_ratio, mask_group_max_aspect_ratio) -> FlavaMaskingGenerator: return FlavaMaskingGenerator(input_size=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio) def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def map_pixels(self, image: np.ndarray) -> np.ndarray: return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS def _preprocess_image(self, image: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: Dict[str, int]=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_map_pixels: bool=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[ChannelDimension]=None) -> np.ndarray: validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_map_pixels: image = self.map_pixels(image) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_image_mask: Optional[bool]=None, input_size_patches: Optional[int]=None, total_mask_patches: Optional[int]=None, mask_group_min_patches: Optional[int]=None, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: Optional[float]=None, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: Optional[bool]=None, codebook_do_resize: Optional[bool]=None, codebook_size: Optional[Dict[str, int]]=None, codebook_resample: Optional[int]=None, codebook_do_center_crop: Optional[bool]=None, codebook_crop_size: Optional[Dict[str, int]]=None, codebook_do_rescale: Optional[bool]=None, codebook_rescale_factor: Optional[float]=None, codebook_do_map_pixels: Optional[bool]=None, codebook_do_normalize: Optional[bool]=None, codebook_image_mean: Optional[Iterable[float]]=None, codebook_image_std: Optional[Iterable[float]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size') do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches mask_group_min_patches = mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches mask_group_max_patches = mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches mask_group_min_aspect_ratio = mask_group_min_aspect_ratio if mask_group_min_aspect_ratio is not None else self.mask_group_min_aspect_ratio mask_group_max_aspect_ratio = mask_group_max_aspect_ratio if mask_group_max_aspect_ratio is not None else self.mask_group_max_aspect_ratio return_codebook_pixels = return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize codebook_size = codebook_size if codebook_size is not None else self.codebook_size codebook_size = get_size_dict(codebook_size, param_name='codebook_size') codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale codebook_rescale_factor = codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor codebook_do_center_crop = codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size codebook_crop_size = get_size_dict(codebook_crop_size, param_name='codebook_crop_size') codebook_do_map_pixels = codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels codebook_do_normalize = codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') processed_images = [self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_map_pixels=False, data_format=data_format, input_data_format=input_data_format) for img in images] data = {'pixel_values': processed_images} if return_codebook_pixels: codebook_images = [self._preprocess_image(image=img, do_resize=codebook_do_resize, size=codebook_size, resample=codebook_resample, do_center_crop=codebook_do_center_crop, crop_size=codebook_crop_size, do_rescale=codebook_do_rescale, rescale_factor=codebook_rescale_factor, do_normalize=codebook_do_normalize, image_mean=codebook_image_mean, image_std=codebook_image_std, do_map_pixels=codebook_do_map_pixels, data_format=data_format, input_data_format=input_data_format) for img in images] data['codebook_pixel_values'] = codebook_images if return_image_mask: mask_generator = self.masking_generator(input_size_patches=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio) masks = [mask_generator() for _ in images] data['bool_masked_pos'] = masks return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/flava/modeling_flava.py """""" import collections import math from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Optional, Set, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/flava-full' _CHECKPOINT_FOR_CODEBOOK_DOC = 'facebook/flava-image-codebook' _CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = 'FlavaImageConfig' _CONFIG_CLASS_FOR_TEXT_MODEL_DOC = 'FlavaTextConfig' _CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = 'FlavaMultimodalConfig' _EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768] LOGIT_SCALE_CLAMP_MIN = 0 LOGIT_SCALE_CLAMP_MAX = 4.6052 FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig] @dataclass class FlavaModelOutput(ModelOutput): image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_output', 'image_output', 'multimodal_output'] else getattr(self, k).to_tuple() for k in self.keys())) @dataclass class FlavaLosses(ModelOutput): mim: Optional[torch.FloatTensor] = None mlm: Optional[torch.FloatTensor] = None itm: Optional[torch.FloatTensor] = None global_contrastive: Optional[torch.FloatTensor] = None mmm_image: Optional[torch.FloatTensor] = None mmm_text: Optional[torch.FloatTensor] = None def all_none(self) -> bool: all_none = True for v in self.values(): if v is not None: all_none = False break return all_none @dataclass class FlavaForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None loss_info: FlavaLosses = None image_embeddings: Optional[torch.FloatTensor] = None image_output: Optional[BaseModelOutputWithPooling] = None text_embeddings: Optional[torch.FloatTensor] = None text_output: Optional[BaseModelOutputWithPooling] = None multimodal_embeddings: Optional[torch.FloatTensor] = None multimodal_output: Optional[BaseModelOutputWithPooling] = None image_masked_embeddings: Optional[torch.FloatTensor] = None image_masked_output: Optional[BaseModelOutputWithPooling] = None text_masked_embeddings: Optional[torch.FloatTensor] = None text_masked_output: Optional[BaseModelOutputWithPooling] = None multimodal_masked_embeddings: Optional[torch.FloatTensor] = None multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None mim_logits: Optional[torch.FloatTensor] = None mlm_logits: Optional[torch.FloatTensor] = None itm_logits: Optional[torch.FloatTensor] = None contrastive_logits_per_image: Optional[torch.FloatTensor] = None contrastive_logits_per_text: Optional[torch.FloatTensor] = None mmm_image_logits: Optional[torch.FloatTensor] = None mmm_text_logits: Optional[torch.FloatTensor] = None def to_tuple(self) -> Tuple[Any]: transformer_outputs = ['text_output', 'image_output', 'multimodal_output', 'text_masked_output', 'image_masked_output', 'multimodal_masked_output'] return tuple((self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())) class FlavaImageEmbeddings(nn.Module): def __init__(self, config: FlavaImageConfig, use_mask_token: bool=False) -> None: super().__init__() use_mask_token = use_mask_token or config.mask_token self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = PatchEmbeddings(image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) (batch_size, seq_len, _) = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) if bool_masked_pos.dim() == 3: bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1) mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class PatchEmbeddings(nn.Module): def __init__(self, image_size: int=224, patch_size: Union[int, Tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768): super().__init__() if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) if not isinstance(patch_size, collections.abc.Iterable): patch_size = (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class FlavaTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None): input_shape = input_ids.size() seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class FlavaSelfAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size {(config.hidden_size,)} is not a multiple of the number of attention heads {config.num_attention_heads}.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class FlavaSelfOutput(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class FlavaAttention(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.attention = FlavaSelfAttention(config) self.output = FlavaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: Set[int]) -> None: if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class FlavaIntermediate(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class FlavaOutput(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class FlavaLayer(nn.Module): def __init__(self, config: FlavaPossibleConfigs) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = FlavaAttention(config) self.intermediate = FlavaIntermediate(config) self.output = FlavaOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: self_attention_outputs = self.attention(self.layernorm_before(hidden_states), attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class FlavaEncoder(nn.Module): def __init__(self, config: FlavaConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class FlavaPooler(nn.Module): def __init__(self, config: FlavaPossibleConfigs): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output FLAVA_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`{config}`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FLAVA_INPUTS_DOCSTRING_COMMON = '\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = "\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`FlavaImageProcessor.__call__`] for details.\n\n bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):\n Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).\n\n interpolate_pos_encoding (`bool`, *optional*):\n Whether to interpolate the pre-trained position encodings.\n" FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_TEXT_INPUTS_DOCSTRING_BASE = '\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n' FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_MULTIMODAL_INPUTS_DOCSTRING = '\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):\n The concatenated hidden states of unimodal encoders.\n' + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_MODEL_INPUTS_DOCSTRING_BASE = '\n Args:\n skip_multimodal_encoder (*bool*, *optional*):\n Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.\n' FLAVA_MODEL_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON + FLAVA_MODEL_INPUTS_DOCSTRING_BASE FLAVA_PRETRAINING_INPUTS_DOCSTRING = '\n Args:\n input_ids_masked (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task\n to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with\n [`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)\n\n' + FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + "\n image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*):\n Mask to avoid performing attention on padding token indices specifically for images. Mask values selected\n in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n skip_unmasked_multimodal_encoder (*bool*, *optional*):\n Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked\n multimodal embeddings or outputs as of now.\n\n mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):\n Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).\n Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with\n indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,\n ..., text_config.vocab_size - 1]`.\n\n mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):\n Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,\n image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only\n computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are\n generated automatically using the image codebook assigned to the model. By default, it uses\n [`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.\n\n itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):\n Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.\n The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.\n\n return_loss (`bool`, *optional*, default to None):\n Whether to return calculated loss or not.\n" + FLAVA_INPUTS_DOCSTRING_COMMON FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = '\n Parameters:\n image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will\n be initialized using the image_codebook_config defined in the config first as the first parameter.\n' class FlavaPreTrainedModel(PreTrainedModel): config_class = FlavaConfig base_model_prefix = 'flava' supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @add_start_docstrings('The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.', FLAVA_START_DOCSTRING.format(config='FlavaImageConfig')) class FlavaImageModel(FlavaPreTrainedModel): config_class = FlavaImageConfig base_model_prefix = 'flava.image_model' main_input_name = 'pixel_values' def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool=True): super().__init__(config) self.config = config self.embeddings = FlavaImageEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.patch_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format('batch_size, image_num_patches')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC, modality='vision', expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.', FLAVA_START_DOCSTRING.format(config='FlavaTextConfig')) class FlavaTextModel(FlavaPreTrainedModel): config_class = FlavaTextConfig base_model_prefix = 'flava.text_model' def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool=True): super().__init__(config) self.config = config self.embeddings = FlavaTextEmbeddings(config) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> PatchEmbeddings: return self.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Module): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format('batch_size, text_seq_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = input_ids.size() if attention_mask is None: attention_mask = torch.ones(input_shape, device=input_ids.device) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, input_ids.device) embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.', FLAVA_START_DOCSTRING.format(config='FlavaMultimodalConfig')) class FlavaMultimodalModel(FlavaPreTrainedModel): config_class = FlavaMultimodalConfig base_model_prefix = 'flava.multimodal_model' main_input_name = 'hidden_states' def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True): super().__init__(config) self.config = config self.use_cls_token = self.config.use_cls_token if self.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.encoder = FlavaEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = FlavaPooler(config) if add_pooling_layer else None self.post_init() def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format('batch_size, image_num_patches + text_seq_len')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, seq_length, _) = hidden_states.size() if self.use_cls_token: cls_tokens = self.cls_token.expand(batch_size, -1, -1) hidden_states = torch.cat((cls_tokens, hidden_states), dim=1) seq_length += 1 if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, (batch_size, seq_length), hidden_states.device) encoder_outputs = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.', FLAVA_START_DOCSTRING.format(config='FlavaConfig')) class FlavaModel(FlavaPreTrainedModel): config_class = FlavaConfig def __init__(self, config: FlavaConfig): super().__init__(config) if not isinstance(config.text_config, FlavaTextConfig): raise TypeError(f'config.text_config is expected to be of type FlavaTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.image_config, FlavaImageConfig): raise TypeError(f'config.image_config is expected to be of type FlavaImageConfig but is of type {type(config.image_config)}.') if not isinstance(config.multimodal_config, FlavaMultimodalConfig): raise TypeError('config.multimodal_config is expected to be of type FlavaMultimodalConfig but ' + f'is of type {type(config.multimodal_config)}.') text_config = config.text_config image_config = config.image_config multimodal_config = config.multimodal_config self.projection_dim = config.projection_dim self.text_hidden_size = text_config.hidden_size self.image_hidden_size = image_config.hidden_size self.mm_hidden_size = multimodal_config.hidden_size self.text_model = FlavaTextModel(text_config) self.image_model = FlavaImageModel(image_config) self.multimodal_model = FlavaMultimodalModel(multimodal_config) self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim) self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size) self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size) self.post_init() @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format('batch_size, text_seq_length')) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: '\n Returns:\n text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\n applying the projection layer to the pooled output of [`FlavaTextModel`].\n\n Examples:\n\n ```python\n >>> from transformers import AutoProcessor, FlavaModel\n\n >>> model = FlavaModel.from_pretrained("{0}")\n >>> processor = AutoProcessor.from_pretrained("{0}")\n\n >>> inputs = processor(\n ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"\n ... )\n >>> text_features = model.get_text_features(**inputs)\n ```'.format(_CHECKPOINT_FOR_DOC) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[0] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format('batch_size, image_num_patches')) def get_image_features(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: '\n Returns:\n image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by\n applying the projection layer to the pooled output of [`FlavaImageModel`].\n\n Examples:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoProcessor, FlavaModel\n\n >>> model = FlavaModel.from_pretrained("{0}")\n >>> processor = AutoProcessor.from_pretrained("{0}")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = processor(images=image, return_tensors="pt")\n\n >>> image_features = model.get_image_features(**inputs)\n ```'.format(_CHECKPOINT_FOR_DOC) image_outputs = self.image_model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) pooled_output = image_outputs[0] image_features = self.image_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(FLAVA_MODEL_INPUTS_DOCSTRING.format('batch_size, image_num_patches + text_seq_len')) @replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_multimodal_encoder: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None) -> Union[Tuple, FlavaOutput]: return_dict = return_dict if return_dict is not None else self.config.return_dict if not output_hidden_states: raise ValueError('FLAVA model requires hidden states to work. Please set `output_hidden_states=True`') image_embeddings = None image_states = None image_mm_projection = None image_output = None if pixel_values is not None: image_output = self.image_model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (image_embeddings, image_states) = (image_output[0], image_output[2]) image_mm_projection = self.image_to_mm_projection(image_states[-1]) text_embeddings = None text_states = None text_mm_projection = None text_output = None if input_ids is not None: text_output = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) (text_embeddings, text_states) = (text_output[0], text_output[2]) text_mm_projection = self.text_to_mm_projection(text_states[-1]) multimodal_embeddings = None multimodal_output = None if image_mm_projection is not None and text_mm_projection is not None and (not skip_multimodal_encoder): if attention_mask is not None: (batch_size, seq_len, _) = image_mm_projection.shape if self.multimodal_model.use_cls_token: seq_len += 1 attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device) attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1) else: attention_multimodal = None multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1) multimodal_output = self.multimodal_model(multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict) multimodal_embeddings = multimodal_output[0] if not return_dict: return (image_embeddings, image_output, text_embeddings, text_output, multimodal_embeddings, multimodal_output) return FlavaModelOutput(image_embeddings=image_embeddings, image_output=image_output, text_embeddings=text_embeddings, text_output=text_output, multimodal_embeddings=multimodal_embeddings, multimodal_output=multimodal_output) class FlavaImageCodebookResPath(nn.Module): def __init__(self, in_size: int, out_size: int, **kwargs): super().__init__() hid_size = out_size // 4 path = OrderedDict() path['relu_1'] = nn.ReLU() path['conv_1'] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1) path['relu_2'] = nn.ReLU() path['conv_2'] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path['relu_3'] = nn.ReLU() path['conv_3'] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1) path['relu_4'] = nn.ReLU() path['conv_4'] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0) self.path = nn.Sequential(path) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.path(x) class FlavaImageCodebookBlock(nn.Module): def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs): super().__init__() self.post_gain = 1 / num_layers ** 2 if in_size != out_size: self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0) else: self.id_path = nn.Identity() self.res_path = FlavaImageCodebookResPath(in_size, out_size) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.id_path(x) + self.post_gain * self.res_path(x) class FlavaImageCodebookLayerGroup(nn.Module): def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool=True): super().__init__() blocks = OrderedDict() for i in range(num_blocks): if i == 0: blocks[f'block_{i + 1}'] = FlavaImageCodebookBlock(in_size, out_size, num_layers) else: blocks[f'block_{i + 1}'] = FlavaImageCodebookBlock(out_size, out_size, num_layers) if use_pool: blocks['pool'] = nn.MaxPool2d(kernel_size=2) self.group = nn.Sequential(blocks) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.group(x) @add_start_docstrings("\n The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used\n to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use\n `get_codebook_indices` to get image tokens for an image.\n ", FLAVA_START_DOCSTRING.format(config='FlavaImageCodebookConfig')) class FlavaImageCodebook(FlavaPreTrainedModel): base_model_prefix = '' config_class = FlavaImageCodebookConfig main_input_name = 'pixel_values' supports_gradient_checkpointing = False def __init__(self, config: FlavaImageCodebookConfig, **kwargs: Any): super().__init__(config) self.config = config self.num_groups = config.num_groups self.input_channels = config.input_channels self.num_blocks_per_group = config.num_blocks_per_group self.hidden_size = config.hidden_size self.vocab_size = config.vocab_size num_layers = self.num_groups * self.num_blocks_per_group output_blocks = OrderedDict() output_blocks['relu'] = nn.ReLU() output_blocks['conv'] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0) blocks = OrderedDict() blocks['input'] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3) blocks['group_1'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size) blocks['group_2'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size) blocks['group_3'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size) blocks['group_4'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False) blocks['output'] = nn.Sequential(output_blocks) self.blocks = nn.Sequential(blocks) self.post_init() if self.config.freeze: for param in self.parameters(): param.requires_grad = False def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor: '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing\n `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.\n\n Examples:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoImageProcessor, FlavaImageCodebook\n\n >>> model = FlavaImageCodebook.from_pretrained("{0}")\n >>> image_processor = AutoImageProcessor.from_pretrained("{0}")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")\n >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)\n\n >>> outputs = model.get_codebook_indices(**inputs)\n ```\n '.format(_CHECKPOINT_FOR_CODEBOOK_DOC) z_logits = self.blocks(pixel_values) return torch.argmax(z_logits, axis=1) def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor: z_logits = self.blocks(pixel_values) return nn.Softmax(dim=1)(z_logits) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing\n `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.\n\n Examples:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoImageProcessor, FlavaImageCodebook\n\n >>> model = FlavaImageCodebook.from_pretrained("{0}")\n >>> image_processor = AutoImageProcessor.from_pretrained("{0}")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")\n >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)\n\n >>> outputs = model(**inputs)\n >>> print(outputs.shape)\n (1, 196)\n ```\n '.format(_CHECKPOINT_FOR_CODEBOOK_DOC) if len(pixel_values.shape) != 4: raise ValueError(f'input shape {pixel_values.shape} is not 4d') if pixel_values.shape[1] != self.input_channels: raise ValueError(f'input has {pixel_values.shape[1]} channels but model built for {self.input_channels}') return self.blocks(pixel_values) class FlavaPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class FlavaMaskedPredictionHead(nn.Module): def __init__(self, config, weight=None): super().__init__() self.config = config self.transform = FlavaPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) if weight is not None: self.decoder.weight = weight self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, x): x = self.transform(x) x = self.decoder(x) return x class FlavaITMHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pooler = FlavaPooler(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, x): x = self.pooler(x) x = self.seq_relationship(x) return x class FlavaGlobalContrastiveHead(nn.Module): def __init__(self, config): super().__init__() self.config = config self.global_backprop_contrastive = config.global_backprop_contrastive def forward(self, image_embeddings, text_embeddings, logit_scale): temperature = torch.exp(logit_scale) if not torch.distributed.is_available() or not torch.distributed.is_initialized(): labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device) image_embeddings_all = [image_embeddings] text_embeddings_all = [text_embeddings] else: local_batch_size = image_embeddings.size(0) world_size = torch.distributed.get_world_size() if self.global_backprop_contrastive: image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings) text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings) else: image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)] text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)] torch.distributed.all_gather(image_embeddings_all, image_embeddings) torch.distributed.all_gather(text_embeddings_all, text_embeddings) labels = local_batch_size * torch.distributed.get_rank() + torch.arange(local_batch_size, device=image_embeddings.device) image_embeddings_all = torch.cat(image_embeddings_all) text_embeddings_all = torch.cat(text_embeddings_all) logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature return (logits_per_image, logits_per_text, labels) @add_start_docstrings('\n The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.\n ', FLAVA_START_DOCSTRING.format(config='FlavaConfig') + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA) class FlavaForPreTraining(FlavaPreTrainedModel): _tied_weights_keys = ['mmm_text_head.decoder.bias', 'mmm_image_head.decoder.bias', 'mlm_head.decoder.bias', 'mim_head.decoder.bias'] def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module]=None): super().__init__(config) self.flava = FlavaModel(config) self.image_codebook = image_codebook if self.image_codebook is None and config.init_codebook: self.image_codebook = FlavaImageCodebook(config.image_codebook_config) self.mim_head = FlavaMaskedPredictionHead(config.image_config) self.mlm_head = FlavaMaskedPredictionHead(config.text_config) self.itm_head = FlavaITMHead(config) self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config) self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config) self.global_contrastive_head = FlavaGlobalContrastiveHead(config) self.image_vocab_size = config.image_config.vocab_size self.text_vocab_size = config.text_config.vocab_size self.mlm_weight = config.mlm_weight self.mim_weight = config.mim_weight self.global_contrastive_weight = config.global_contrastive_weight self.ce_ignore_index = config.ce_ignore_index self.itm_weight = config.itm_weight self.mmm_image_weight = config.mmm_image_weight self.mmm_text_weight = config.mmm_text_weight self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder self.post_init() def _resize_to_2d(self, x: torch.Tensor): if x.dim() > 2: x = x.view(x.size(0), -1) return x @add_start_docstrings_to_model_forward(FLAVA_PRETRAINING_INPUTS_DOCSTRING.format('batch_size, text_seq_len', 'batch_size, image_num_patches')) @replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, input_ids_masked: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, codebook_pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_unmasked_multimodal_encoder: bool=None, mlm_labels: Optional[torch.Tensor]=None, mim_labels: Optional[torch.Tensor]=None, itm_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_loss = return_loss if return_loss is not None else self.config.return_loss skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder if skip_unmasked_multimodal_encoder is not None else self.skip_unmasked_multimodal_encoder if input_ids_masked is None and input_ids is not None: logger.warning("`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if you are doing inference on unmasked text...") input_ids_masked = input_ids flava_output = self.flava(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, image_attention_mask=image_attention_mask, skip_multimodal_encoder=skip_unmasked_multimodal_encoder, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) flava_masked_output = self.flava(input_ids=input_ids_masked, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, image_attention_mask=image_attention_mask, bool_masked_pos=bool_masked_pos, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) pos_mask = None image_embeddings = flava_output.image_embeddings text_embeddings = flava_output.text_embeddings image_masked_embeddings = flava_masked_output.image_embeddings text_masked_embeddings = flava_masked_output.text_embeddings multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None itm_logits = logits_per_image = logits_per_text = None if image_masked_embeddings is not None or multimodal_masked_embeddings is not None: if mim_labels is None and return_loss: if self.image_codebook is None: raise RuntimeError('`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` have been passed. Reinstantiate the model with `init_codebook` set to True or pass in your custom `mim_labels`') if codebook_pixel_values is None: raise ValueError('`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. Call `AutoProcessor` with `return_codebook_pixels` set to True') mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values) if self.mim_weight > 0 and image_masked_embeddings is not None and (multimodal_masked_embeddings is None): sequence_for_image = image_masked_embeddings if mim_labels is not None: mim_labels = self._resize_to_2d(mim_labels) bool_masked_pos = self._resize_to_2d(bool_masked_pos) mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index sequence_for_image = sequence_for_image[:, -mim_labels.size(1):, :] masked_tokens = mim_labels.ne(self.ce_ignore_index) mim_labels_filtered = mim_labels[masked_tokens] sequence_for_image = sequence_for_image[masked_tokens, :] mim_logits = self.mim_head(sequence_for_image) if return_loss: mim_loss = nn.functional.cross_entropy(mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)) mim_loss *= self.mim_weight else: mim_logits = self.mim_head(sequence_for_image) if self.mlm_weight > 0 and text_masked_embeddings is not None and (multimodal_masked_embeddings is None): sequence_for_text = text_masked_embeddings if mlm_labels is not None: mlm_labels = self._resize_to_2d(mlm_labels) sequence_for_text = sequence_for_text[:, -mlm_labels.size(1):, :] masked_tokens = mlm_labels.ne(self.ce_ignore_index) mlm_labels_filtered = mlm_labels[masked_tokens] sequence_for_text = sequence_for_text[masked_tokens, :] mlm_logits = self.mlm_head(sequence_for_text) if return_loss: mlm_loss = nn.functional.cross_entropy(mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)) mlm_loss *= self.mlm_weight else: mlm_logits = self.mlm_head(sequence_for_text) if self.itm_weight > 0 and multimodal_masked_embeddings is not None: itm_logits = self.itm_head(multimodal_masked_embeddings) if itm_labels is not None: pos_pairs = itm_labels.ne(0) pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True])) if return_loss: itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels) itm_loss *= self.itm_weight if multimodal_masked_embeddings is not None: multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask] if mlm_labels is not None: mlm_labels = mlm_labels[pos_mask] if mim_labels is not None: mim_labels = mim_labels[pos_mask] bool_masked_pos = bool_masked_pos[pos_mask] if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0: sequence_for_image = multimodal_masked_embeddings end_index = image_masked_embeddings.size(1) - 1 sequence_for_image = sequence_for_image[:, 2:2 + end_index, :] if mim_labels is not None: mim_labels = self._resize_to_2d(mim_labels) bool_masked_pos = self._resize_to_2d(bool_masked_pos) mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index masked_tokens = mim_labels.ne(self.ce_ignore_index) mim_labels_filtered = mim_labels[masked_tokens] sequence_for_image = sequence_for_image[masked_tokens, :] mmm_image_logits = self.mmm_image_head(sequence_for_image) if return_loss: mmm_image_loss = nn.functional.cross_entropy(mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)) mmm_image_loss *= self.mmm_image_weight else: mmm_image_logits = self.mmm_image_head(sequence_for_image) if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0: sequence_for_text = multimodal_masked_embeddings sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1):, :] if mlm_labels is not None: mlm_labels = self._resize_to_2d(mlm_labels) masked_tokens = mlm_labels.ne(self.ce_ignore_index) mlm_labels_filtered = mlm_labels[masked_tokens] sequence_for_text = sequence_for_text[masked_tokens, :] mmm_text_logits = self.mmm_text_head(sequence_for_text) if return_loss: mmm_text_loss = nn.functional.cross_entropy(mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)) mmm_text_loss *= self.mmm_text_weight else: mmm_text_logits = self.mmm_text_head(sequence_for_text) if image_embeddings is not None and text_embeddings is not None and (self.global_contrastive_weight > 0): text_embedding = self.flava.text_projection(text_embeddings[:, 0, :]) text_embedding = nn.functional.normalize(text_embedding, dim=-1) image_embedding = self.flava.image_projection(image_embeddings[:, 0, :]) image_embedding = nn.functional.normalize(image_embedding, dim=-1) self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX) (logits_per_image, logits_per_text, gc_labels) = self.global_contrastive_head(image_embedding, text_embedding, self.flava.logit_scale) if pos_mask is not None: logits_per_image = logits_per_image[pos_mask] logits_per_text = logits_per_text[pos_mask] gc_labels = gc_labels[pos_mask] if return_loss: gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels) gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels) gc_loss = (gc_loss_image + gc_loss_text) / 2 gc_loss *= self.global_contrastive_weight flava_losses = FlavaLosses(mim=mim_loss, mlm=mlm_loss, itm=itm_loss, global_contrastive=gc_loss, mmm_image=mmm_image_loss, mmm_text=mmm_text_loss) if return_loss and (not flava_losses.all_none()): total_loss = sum((loss if loss is not None else 0 for loss in flava_losses.values())) if not return_dict: output = (image_embeddings, flava_output.image_output.to_tuple() if flava_output.image_output is not None else None, text_embeddings, flava_output.text_output.to_tuple() if flava_output.text_output is not None else None, flava_output.multimodal_embeddings, flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None, image_masked_embeddings, flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None, text_masked_embeddings, flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None, multimodal_masked_embeddings, flava_masked_output.multimodal_output.to_tuple() if flava_masked_output.multimodal_output is not None else None, mim_logits, mlm_logits, itm_logits, logits_per_image, logits_per_image, mmm_image_logits, mmm_text_logits) if return_loss and (not flava_losses.all_none()): output = (total_loss, flava_losses) + output return tuple((x for x in output if x is None)) return FlavaForPreTrainingOutput(loss=total_loss, loss_info=flava_losses, image_embeddings=image_embeddings, image_output=flava_output.image_output, text_embeddings=text_embeddings, text_output=flava_output.text_output, multimodal_embeddings=flava_output.multimodal_embeddings, multimodal_output=flava_output.multimodal_output, image_masked_embeddings=image_masked_embeddings, image_masked_output=flava_masked_output.image_output, text_masked_embeddings=text_masked_embeddings, text_masked_output=flava_masked_output.text_output, multimodal_masked_embeddings=multimodal_masked_embeddings, multimodal_masked_output=flava_masked_output.multimodal_output, mim_logits=mim_logits, mlm_logits=mlm_logits, itm_logits=itm_logits, contrastive_logits_per_image=logits_per_image, contrastive_logits_per_text=logits_per_text, mmm_image_logits=mmm_image_logits, mmm_text_logits=mmm_text_logits) # File: transformers-main/src/transformers/models/flava/processing_flava.py """""" import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class FlavaProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'FlavaImageProcessor' tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=False, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_image_mask: Optional[bool]=None, return_codebook_pixels: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_image_mask=return_image_mask, return_codebook_pixels=return_codebook_pixels, return_tensors=return_tensors, **kwargs) if text is not None and images is not None: encoding.update(image_features) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class @property def feature_extractor(self): warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning) return self.image_processor # File: transformers-main/src/transformers/models/fnet/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_fnet': ['FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_fnet'] = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_fnet_fast'] = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_fnet'] = ['FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel'] if TYPE_CHECKING: from .configuration_fnet import FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/fnet/configuration_fnet.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FNetConfig(PretrainedConfig): model_type = 'fnet' def __init__(self, vocab_size=32000, hidden_size=768, num_hidden_layers=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=4, initializer_range=0.02, layer_norm_eps=1e-12, use_tpu_fourier_optimizations=False, tpu_short_seq_length=512, pad_token_id=3, bos_token_id=1, eos_token_id=2, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations self.tpu_short_seq_length = tpu_short_seq_length # File: transformers-main/src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py """""" import argparse import torch from flax.training.checkpoints import restore_checkpoint from transformers import FNetConfig, FNetForPreTraining from transformers.utils import logging logging.set_verbosity_info() def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, fnet_config_file, save_path): config = FNetConfig.from_json_file(fnet_config_file) print(f'Building PyTorch model from configuration: {config}') fnet_pretraining_model = FNetForPreTraining(config) checkpoint_dict = restore_checkpoint(flax_checkpoint_path, None) pretrained_model_params = checkpoint_dict['target'] state_dict = fnet_pretraining_model.state_dict() position_ids = state_dict['fnet.embeddings.position_ids'] new_state_dict = {'fnet.embeddings.position_ids': position_ids} new_state_dict['fnet.embeddings.word_embeddings.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['word']['embedding']) new_state_dict['fnet.embeddings.position_embeddings.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['position']['embedding'][0]) new_state_dict['fnet.embeddings.token_type_embeddings.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['type']['embedding']) new_state_dict['fnet.embeddings.projection.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['hidden_mapping_in']['kernel']).T new_state_dict['fnet.embeddings.projection.bias'] = torch.tensor(pretrained_model_params['encoder']['embedder']['hidden_mapping_in']['bias']) new_state_dict['fnet.embeddings.LayerNorm.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['layer_norm']['scale']) new_state_dict['fnet.embeddings.LayerNorm.bias'] = torch.tensor(pretrained_model_params['encoder']['embedder']['layer_norm']['bias']) for layer in range(config.num_hidden_layers): new_state_dict[f'fnet.encoder.layer.{layer}.fourier.output.LayerNorm.weight'] = torch.tensor(pretrained_model_params['encoder'][f'encoder_{layer}']['mixing_layer_norm']['scale']) new_state_dict[f'fnet.encoder.layer.{layer}.fourier.output.LayerNorm.bias'] = torch.tensor(pretrained_model_params['encoder'][f'encoder_{layer}']['mixing_layer_norm']['bias']) new_state_dict[f'fnet.encoder.layer.{layer}.intermediate.dense.weight'] = torch.tensor(pretrained_model_params['encoder'][f'feed_forward_{layer}']['intermediate']['kernel']).T new_state_dict[f'fnet.encoder.layer.{layer}.intermediate.dense.bias'] = torch.tensor(pretrained_model_params['encoder'][f'feed_forward_{layer}']['intermediate']['bias']) new_state_dict[f'fnet.encoder.layer.{layer}.output.dense.weight'] = torch.tensor(pretrained_model_params['encoder'][f'feed_forward_{layer}']['output']['kernel']).T new_state_dict[f'fnet.encoder.layer.{layer}.output.dense.bias'] = torch.tensor(pretrained_model_params['encoder'][f'feed_forward_{layer}']['output']['bias']) new_state_dict[f'fnet.encoder.layer.{layer}.output.LayerNorm.weight'] = torch.tensor(pretrained_model_params['encoder'][f'encoder_{layer}']['output_layer_norm']['scale']) new_state_dict[f'fnet.encoder.layer.{layer}.output.LayerNorm.bias'] = torch.tensor(pretrained_model_params['encoder'][f'encoder_{layer}']['output_layer_norm']['bias']) new_state_dict['fnet.pooler.dense.weight'] = torch.tensor(pretrained_model_params['encoder']['pooler']['kernel']).T new_state_dict['fnet.pooler.dense.bias'] = torch.tensor(pretrained_model_params['encoder']['pooler']['bias']) new_state_dict['cls.predictions.transform.dense.weight'] = torch.tensor(pretrained_model_params['predictions_dense']['kernel']).T new_state_dict['cls.predictions.transform.dense.bias'] = torch.tensor(pretrained_model_params['predictions_dense']['bias']) new_state_dict['cls.predictions.transform.LayerNorm.weight'] = torch.tensor(pretrained_model_params['predictions_layer_norm']['scale']) new_state_dict['cls.predictions.transform.LayerNorm.bias'] = torch.tensor(pretrained_model_params['predictions_layer_norm']['bias']) new_state_dict['cls.predictions.decoder.weight'] = torch.tensor(pretrained_model_params['encoder']['embedder']['word']['embedding']) new_state_dict['cls.predictions.decoder.bias'] = torch.tensor(pretrained_model_params['predictions_output']['output_bias']) new_state_dict['cls.predictions.bias'] = torch.tensor(pretrained_model_params['predictions_output']['output_bias']) new_state_dict['cls.seq_relationship.weight'] = torch.tensor(pretrained_model_params['classification']['output_kernel']) new_state_dict['cls.seq_relationship.bias'] = torch.tensor(pretrained_model_params['classification']['output_bias']) fnet_pretraining_model.load_state_dict(new_state_dict) print(f'Saving pretrained model to {save_path}') fnet_pretraining_model.save_pretrained(save_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--flax_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--fnet_config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained FNet model. \nThis specifies the model architecture.') parser.add_argument('--save_path', default=None, type=str, required=True, help='Path to the output model.') args = parser.parse_args() convert_flax_checkpoint_to_pytorch(args.flax_checkpoint_path, args.fnet_config_file, args.save_path) # File: transformers-main/src/transformers/models/fnet/modeling_fnet.py """""" import warnings from dataclasses import dataclass from functools import partial from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import is_scipy_available if is_scipy_available(): from scipy import linalg from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fnet import FNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'google/fnet-base' _CONFIG_FOR_DOC = 'FNetConfig' def _two_dim_matmul(x, matrix_dim_one, matrix_dim_two): seq_length = x.shape[1] matrix_dim_one = matrix_dim_one[:seq_length, :seq_length] x = x.type(torch.complex64) return torch.einsum('bij,jk,ni->bnk', x, matrix_dim_two, matrix_dim_one) def two_dim_matmul(x, matrix_dim_one, matrix_dim_two): return _two_dim_matmul(x, matrix_dim_one, matrix_dim_two) def fftn(x): out = x for axis in reversed(range(x.ndim)[1:]): out = torch.fft.fft(out, axis=axis) return out class FNetEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.projection = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.projection(embeddings) embeddings = self.dropout(embeddings) return embeddings class FNetBasicFourierTransform(nn.Module): def __init__(self, config): super().__init__() self._init_fourier_transform(config) def _init_fourier_transform(self, config): if not config.use_tpu_fourier_optimizations: self.fourier_transform = partial(torch.fft.fftn, dim=(1, 2)) elif config.max_position_embeddings <= 4096: if is_scipy_available(): self.register_buffer('dft_mat_hidden', torch.tensor(linalg.dft(config.hidden_size), dtype=torch.complex64)) self.register_buffer('dft_mat_seq', torch.tensor(linalg.dft(config.tpu_short_seq_length), dtype=torch.complex64)) self.fourier_transform = partial(two_dim_matmul, matrix_dim_one=self.dft_mat_seq, matrix_dim_two=self.dft_mat_hidden) else: logging.warning('SciPy is needed for DFT matrix calculation and is not found. Using TPU optimized fast fourier transform instead.') self.fourier_transform = fftn else: self.fourier_transform = fftn def forward(self, hidden_states): outputs = self.fourier_transform(hidden_states).real return (outputs,) class FNetBasicOutput(nn.Module): def __init__(self, config): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, input_tensor): hidden_states = self.LayerNorm(input_tensor + hidden_states) return hidden_states class FNetFourierTransform(nn.Module): def __init__(self, config): super().__init__() self.self = FNetBasicFourierTransform(config) self.output = FNetBasicOutput(config) def forward(self, hidden_states): self_outputs = self.self(hidden_states) fourier_output = self.output(self_outputs[0], hidden_states) outputs = (fourier_output,) return outputs class FNetIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class FNetOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FNetLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.fourier = FNetFourierTransform(config) self.intermediate = FNetIntermediate(config) self.output = FNetOutput(config) def forward(self, hidden_states): self_fourier_outputs = self.fourier(hidden_states) fourier_output = self_fourier_outputs[0] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output) outputs = (layer_output,) return outputs def feed_forward_chunk(self, fourier_output): intermediate_output = self.intermediate(fourier_output) layer_output = self.output(intermediate_output, fourier_output) return layer_output class FNetEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states) else: layer_outputs = layer_module(hidden_states) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) class FNetPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class FNetPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class FNetLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = FNetPredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states def _tie_weights(self) -> None: if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias class FNetOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = FNetLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class FNetOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class FNetPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = FNetLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return (prediction_scores, seq_relationship_score) class FNetPreTrainedModel(PreTrainedModel): config_class = FNetConfig base_model_prefix = 'fnet' supports_gradient_checkpointing = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class FNetForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None FNET_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`FNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FNET_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare FNet Model transformer outputting raw hidden-states without any specific head on top.', FNET_START_DOCSTRING) class FNetModel(FNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = FNetEmbeddings(config) self.encoder = FNetEncoder(config) self.pooler = FNetPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = input_ids.size() (batch_size, seq_length) = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] (batch_size, seq_length) = input_shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') if self.config.use_tpu_fourier_optimizations and seq_length <= 4096 and (self.config.tpu_short_seq_length != seq_length): raise ValueError('The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to the model when using TPU optimizations.') device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooler_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooler_output, hidden_states=encoder_outputs.hidden_states) @add_start_docstrings('\n FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ', FNET_START_DOCSTRING) class FNetForPreTraining(FNetPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.fnet = FNetModel(config) self.cls = FNetPreTrainingHeads(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=FNetForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FNetForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) (sequence_output, pooled_output) = outputs[:2] (prediction_scores, seq_relationship_score) = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return FNetForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states) @add_start_docstrings('FNet Model with a `language modeling` head on top.', FNET_START_DOCSTRING) class FNetForMaskedLM(FNetPreTrainedModel): _tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight'] def __init__(self, config): super().__init__(config) self.fnet = FNetModel(config) self.cls = FNetOnlyMLMHead(config) self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states) @add_start_docstrings('FNet Model with a `next sentence prediction (classification)` head on top.', FNET_START_DOCSTRING) class FNetForNextSentencePrediction(FNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.fnet = FNetModel(config) self.cls = FNetOnlyNSPHead(config) self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, NextSentencePredictorOutput]: if 'next_sentence_label' in kwargs: warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning) labels = kwargs.pop('next_sentence_label') return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return (next_sentence_loss,) + output if next_sentence_loss is not None else output return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states) @add_start_docstrings('\n FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', FNET_START_DOCSTRING) class FNetForSequenceClassification(FNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.fnet = FNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings('\n FNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', FNET_START_DOCSTRING) class FNetForMultipleChoice(FNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.fnet = FNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states) @add_start_docstrings('\n FNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', FNET_START_DOCSTRING) class FNetForTokenClassification(FNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.fnet = FNetModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states) @add_start_docstrings('\n FNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FNET_START_DOCSTRING) class FNetForQuestionAnswering(FNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.fnet = FNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states) # File: transformers-main/src/transformers/models/fnet/tokenization_fnet.py """""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} SPIECE_UNDERLINE = '▁' class FNetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'token_type_ids'] def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='', sep_token='[SEP]', pad_token='', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) @property def vocab_size(self): return len(self.sp_model) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def preprocess_text(self, inputs): if self.remove_space: outputs = ' '.join(inputs.strip().split()) else: outputs = inputs outputs = outputs.replace('``', '"').replace("''", '"') if not self.keep_accents: outputs = unicodedata.normalize('NFKD', outputs) outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) if self.do_lower_case: outputs = outputs.lower() return outputs def _tokenize(self, text: str) -> List[str]: text = self.preprocess_text(text) pieces = self.sp_model.encode(text, out_type=str) new_pieces = [] for piece in pieces: if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def _convert_token_to_id(self, token): return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index): return self.sp_model.IdToPiece(index) def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, spaces_between_special_tokens: bool=False, **kwargs) -> str: text = super()._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs) if not spaces_between_special_tokens: text = text.replace(' ', '') return text def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # File: transformers-main/src/transformers/models/fnet/tokenization_fnet_fast.py """""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: FNetTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SPIECE_UNDERLINE = '▁' class FNetTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'token_type_ids'] slow_tokenizer_class = FNetTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='', sep_token='[SEP]', pad_token='', cls_token='[CLS]', mask_token='[MASK]', **kwargs): mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs) self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) # File: transformers-main/src/transformers/models/focalnet/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_focalnet': ['FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_focalnet'] = ['FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel'] if TYPE_CHECKING: from .configuration_focalnet import FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/focalnet/configuration_focalnet.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class FocalNetConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'focalnet' def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, use_conv_embed=False, hidden_sizes=[192, 384, 768, 768], depths=[2, 2, 6, 2], focal_levels=[2, 2, 2, 2], focal_windows=[3, 3, 3, 3], hidden_act='gelu', mlp_ratio=4.0, hidden_dropout_prob=0.0, drop_path_rate=0.1, use_layerscale=False, layerscale_value=0.0001, use_post_layernorm=False, use_post_layernorm_in_modulation=False, normalize_modulator=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.use_conv_embed = use_conv_embed self.hidden_sizes = hidden_sizes self.depths = depths self.focal_levels = focal_levels self.focal_windows = focal_windows self.hidden_act = hidden_act self.mlp_ratio = mlp_ratio self.hidden_dropout_prob = hidden_dropout_prob self.drop_path_rate = drop_path_rate self.use_layerscale = use_layerscale self.layerscale_value = layerscale_value self.use_post_layernorm = use_post_layernorm self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation self.normalize_modulator = normalize_modulator self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.encoder_stride = encoder_stride self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) # File: transformers-main/src/transformers/models/focalnet/convert_focalnet_to_hf_format.py """""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def get_focalnet_config(model_name): depths = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] use_conv_embed = True if 'large' in model_name or 'huge' in model_name else False use_post_layernorm = True if 'large' in model_name or 'huge' in model_name else False use_layerscale = True if 'large' in model_name or 'huge' in model_name else False if 'large' in model_name or 'xlarge' in model_name or 'huge' in model_name: if 'fl3' in model_name: focal_levels = [3, 3, 3, 3] focal_windows = [5, 5, 5, 5] elif 'fl4' in model_name: focal_levels = [4, 4, 4, 4] focal_windows = [3, 3, 3, 3] if 'tiny' in model_name or 'small' in model_name or 'base' in model_name: focal_windows = [3, 3, 3, 3] if 'lrf' in model_name: focal_levels = [3, 3, 3, 3] else: focal_levels = [2, 2, 2, 2] if 'tiny' in model_name: embed_dim = 96 elif 'small' in model_name: embed_dim = 96 elif 'base' in model_name: embed_dim = 128 elif 'large' in model_name: embed_dim = 192 elif 'xlarge' in model_name: embed_dim = 256 elif 'huge' in model_name: embed_dim = 352 repo_id = 'huggingface/label-files' if 'large' in model_name or 'huge' in model_name: filename = 'imagenet-22k-id2label.json' else: filename = 'imagenet-1k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} label2id = {v: k for (k, v) in id2label.items()} config = FocalNetConfig(embed_dim=embed_dim, depths=depths, focal_levels=focal_levels, focal_windows=focal_windows, use_conv_embed=use_conv_embed, id2label=id2label, label2id=label2id, use_post_layernorm=use_post_layernorm, use_layerscale=use_layerscale) return config def rename_key(name): if 'patch_embed.proj' in name: name = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection') if 'patch_embed.norm' in name: name = name.replace('patch_embed.norm', 'embeddings.norm') if 'layers' in name: name = 'encoder.' + name if 'encoder.layers' in name: name = name.replace('encoder.layers', 'encoder.stages') if 'downsample.proj' in name: name = name.replace('downsample.proj', 'downsample.projection') if 'blocks' in name: name = name.replace('blocks', 'layers') if 'modulation.f.weight' in name or 'modulation.f.bias' in name: name = name.replace('modulation.f', 'modulation.projection_in') if 'modulation.h.weight' in name or 'modulation.h.bias' in name: name = name.replace('modulation.h', 'modulation.projection_context') if 'modulation.proj.weight' in name or 'modulation.proj.bias' in name: name = name.replace('modulation.proj', 'modulation.projection_out') if name == 'norm.weight': name = 'layernorm.weight' if name == 'norm.bias': name = 'layernorm.bias' if 'head' in name: name = name.replace('head', 'classifier') else: name = 'focalnet.' + name return name def convert_focalnet_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): model_name_to_url = {'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth'} checkpoint_url = model_name_to_url[model_name] print('Checkpoint URL: ', checkpoint_url) state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu')['model'] for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val config = get_focalnet_config(model_name) model = FocalNetForImageClassification(config) model.eval() model.load_state_dict(state_dict) url = 'http://images.cocodataset.org/val2017/000000039769.jpg' processor = BitImageProcessor(do_resize=True, size={'shortest_edge': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=True, crop_size=224, do_normalize=True, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD) image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors='pt') image_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) original_pixel_values = image_transforms(image).unsqueeze(0) assert torch.allclose(inputs.pixel_values, original_pixel_values, atol=0.0001) outputs = model(**inputs) predicted_class_idx = outputs.logits.argmax(-1).item() print('Predicted class:', model.config.id2label[predicted_class_idx]) print('First values of logits:', outputs.logits[0, :3]) if model_name == 'focalnet-tiny': expected_slice = torch.tensor([0.2166, -0.4368, 0.2191]) elif model_name == 'focalnet-tiny-lrf': expected_slice = torch.tensor([1.1669, 0.0125, -0.1695]) elif model_name == 'focalnet-small': expected_slice = torch.tensor([0.4917, -0.043, 0.1341]) elif model_name == 'focalnet-small-lrf': expected_slice = torch.tensor([-0.2588, -0.5342, -0.2331]) elif model_name == 'focalnet-base': expected_slice = torch.tensor([-0.1655, -0.409, -0.173]) elif model_name == 'focalnet-base-lrf': expected_slice = torch.tensor([0.5306, -0.0483, -0.3928]) assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=0.0001) print('Looks ok!') if pytorch_dump_folder_path is not None: print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing model and processor of {model_name} to the hub...') model.push_to_hub(f'{model_name}') processor.push_to_hub(f'{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='focalnet-tiny', type=str, help="Name of the FocalNet model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.') args = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/focalnet/modeling_focalnet.py """""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_focalnet import FocalNetConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'FocalNetConfig' _CHECKPOINT_FOR_DOC = 'microsoft/focalnet-tiny' _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] _IMAGE_CLASS_CHECKPOINT = 'microsoft/focalnet-tiny' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' @dataclass class FocalNetEncoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetMaskedImageModelingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None reconstruction: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class FocalNetImageClassifierOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class FocalNetEmbeddings(nn.Module): def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = FocalNetPatchEmbeddings(config=config, image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.embed_dim, use_conv_embed=config.use_conv_embed, is_stem=True) self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None) -> Tuple[torch.Tensor]: (embeddings, output_dimensions) = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) (batch_size, seq_len, _) = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask embeddings = self.dropout(embeddings) return (embeddings, output_dimensions) class FocalNetPatchEmbeddings(nn.Module): def __init__(self, config, image_size, patch_size, num_channels, embed_dim, add_norm=False, use_conv_embed=False, is_stem=False): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) if use_conv_embed: if is_stem: kernel_size = 7 padding = 2 stride = 4 else: kernel_size = 3 padding = 1 stride = 2 self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) else: self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) if add_norm: self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) else: self.norm = None def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: (_, num_channels, height, width) = pixel_values.shape if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) (_, _, height, width) = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) if self.norm is not None: embeddings = self.norm(embeddings) return (embeddings, output_dimensions) def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class FocalNetDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class FocalNetModulation(nn.Module): def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0): super().__init__() self.dim = dim self.focal_window = config.focal_windows[index] self.focal_level = config.focal_levels[index] self.focal_factor = focal_factor self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation self.normalize_modulator = config.normalize_modulator self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias) self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias) self.activation = nn.GELU() self.projection_out = nn.Linear(dim, dim) self.projection_dropout = nn.Dropout(projection_dropout) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append(nn.Sequential(nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False), nn.GELU())) self.kernel_sizes.append(kernel_size) if self.use_post_layernorm_in_modulation: self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps) def forward(self, hidden_state): num_channels = hidden_state.shape[-1] x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous() (q, ctx, self.gates) = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1) ctx_all = 0 for level in range(self.focal_level): ctx = self.focal_layers[level](ctx) ctx_all = ctx_all + ctx * self.gates[:, level:level + 1] ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True)) ctx_all = ctx_all + ctx_global * self.gates[:, self.focal_level:] if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) self.modulator = self.projection_context(ctx_all) x_out = q * self.modulator x_out = x_out.permute(0, 2, 3, 1).contiguous() if self.use_post_layernorm_in_modulation: x_out = self.layernorm(x_out) x_out = self.projection_out(x_out) x_out = self.projection_dropout(x_out) return x_out class FocalNetMlp(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.activation = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, hidden_state): hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.drop(hidden_state) hidden_state = self.fc2(hidden_state) hidden_state = self.drop(hidden_state) return hidden_state class FocalNetLayer(nn.Module): def __init__(self, config, index, dim, input_resolution, drop_path=0.0): super().__init__() self.config = config self.dim = dim self.input_resolution = input_resolution self.drop = config.hidden_dropout_prob self.use_post_layernorm = config.use_post_layernorm self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.modulation = FocalNetModulation(config=config, index=index, dim=dim, projection_dropout=self.drop) self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) mlp_hidden_dim = int(dim * config.mlp_ratio) self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop) self.gamma_1 = 1.0 self.gamma_2 = 1.0 if config.use_layerscale: self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True) self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True) def forward(self, hidden_state, input_dimensions): (height, width) = input_dimensions (batch_size, _, num_channels) = hidden_state.shape shortcut = hidden_state hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels) hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state) hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state) hidden_state = hidden_state + self.drop_path(self.gamma_2 * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))) return hidden_state class FocalNetStage(nn.Module): def __init__(self, config, index, input_resolution): super().__init__() self.config = config self.num_stages = len(config.depths) embed_dim = [config.embed_dim * 2 ** i for i in range(self.num_stages)] dim = embed_dim[index] out_dim = embed_dim[index + 1] if index < self.num_stages - 1 else None downsample = FocalNetPatchEmbeddings if index < self.num_stages - 1 else None dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] drop_path = dpr[sum(config.depths[:index]):sum(config.depths[:index + 1])] self.layers = nn.ModuleList([FocalNetLayer(config=config, index=index, dim=dim, input_resolution=input_resolution, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path) for i in range(config.depths[index])]) if downsample is not None: self.downsample = downsample(config=config, image_size=input_resolution, patch_size=2, num_channels=dim, embed_dim=out_dim, add_norm=True, use_conv_embed=config.use_conv_embed, is_stem=False) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int]) -> Tuple[torch.Tensor]: (height, width) = input_dimensions for layer_module in self.layers: hidden_states = layer_module(hidden_states, input_dimensions) hidden_states_before_downsampling = hidden_states if self.downsample is not None: (height, width) = input_dimensions hidden_states = hidden_states.transpose(1, 2).reshape(hidden_states_before_downsampling.shape[0], -1, height, width) (hidden_states, output_dimensions) = self.downsample(hidden_states) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) return stage_outputs class FocalNetEncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_stages = len(config.depths) self.config = config self.stages = nn.ModuleList([FocalNetStage(config=config, index=i_layer, input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer)) for i_layer in range(self.num_stages)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple, FocalNetEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None if output_hidden_states: (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for (i, stage_module) in enumerate(self.stages): if self.gradient_checkpointing and self.training: stage_outputs = self._gradient_checkpointing_func(stage_module.__call__, hidden_states, input_dimensions) else: stage_outputs = stage_module(hidden_states, input_dimensions) hidden_states = stage_outputs[0] hidden_states_before_downsampling = stage_outputs[1] output_dimensions = stage_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: (batch_size, _, hidden_size) = hidden_states_before_downsampling.shape reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): (batch_size, _, hidden_size) = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states] if v is not None)) return FocalNetEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, reshaped_hidden_states=all_reshaped_hidden_states) class FocalNetPreTrainedModel(PreTrainedModel): config_class = FocalNetConfig base_model_prefix = 'focalnet' main_input_name = 'pixel_values' supports_gradient_checkpointing = True _no_split_modules = ['FocalNetStage'] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) FOCALNET_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`FocalNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FOCALNET_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`AutoImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare FocalNet Model outputting raw hidden-states without any specific head on top.', FOCALNET_START_DOCSTRING) class FocalNetModel(FocalNetPreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): super().__init__(config) self.config = config self.num_stages = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token) self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=FocalNetModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FocalNetModelOutput]: output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') (embedding_output, input_dimensions) = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) encoder_outputs = self.encoder(embedding_output, input_dimensions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return FocalNetModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states) @add_start_docstrings('FocalNet Model with a decoder on top for masked image modeling.\n\n This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886).\n\n \n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n \n ', FOCALNET_START_DOCSTRING) class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True) self.num_stages = len(config.depths) num_features = int(config.embed_dim * 2 ** (self.num_stages - 1)) self.decoder = nn.Sequential(nn.Conv2d(in_channels=num_features, out_channels=config.encoder_stride ** 2 * config.num_channels, kernel_size=1), nn.PixelShuffle(config.encoder_stride)) self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FocalNetMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FocalNetMaskedImageModelingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet(pixel_values, bool_masked_pos=bool_masked_pos, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = sequence_output.transpose(1, 2) (batch_size, num_channels, sequence_length) = sequence_output.shape height = width = math.floor(sequence_length ** 0.5) sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = bool_masked_pos.repeat_interleave(self.config.patch_size, 1).repeat_interleave(self.config.patch_size, 2).unsqueeze(1).contiguous() reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction='none') masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-05) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return (masked_im_loss,) + output if masked_im_loss is not None else output return FocalNetMaskedImageModelingOutput(loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states) @add_start_docstrings('\n FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for\n ImageNet.\n ', FOCALNET_START_DOCSTRING) class FocalNetForImageClassification(FocalNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.focalnet = FocalNetModel(config) self.classifier = nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=FocalNetImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FocalNetImageClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.focalnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return FocalNetImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, reshaped_hidden_states=outputs.reshaped_hidden_states) @add_start_docstrings('\n FocalNet backbone, to be used with frameworks like X-Decoder.\n ', FOCALNET_START_DOCSTRING) class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin): def __init__(self, config: FocalNetConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + config.hidden_sizes self.focalnet = FocalNetModel(config) self.post_init() @add_start_docstrings_to_model_forward(FOCALNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True) hidden_states = outputs.reshaped_hidden_states feature_maps = () for (idx, stage) in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None) # File: transformers-main/src/transformers/models/fsmt/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_fsmt': ['FSMTConfig'], 'tokenization_fsmt': ['FSMTTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_fsmt'] = ['FSMTForConditionalGeneration', 'FSMTModel', 'PretrainedFSMTModel'] if TYPE_CHECKING: from .configuration_fsmt import FSMTConfig from .tokenization_fsmt import FSMTTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/fsmt/configuration_fsmt.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DecoderConfig(PretrainedConfig): model_type = 'fsmt_decoder' def __init__(self, vocab_size=0, bos_token_id=0): super().__init__() self.vocab_size = vocab_size self.bos_token_id = bos_token_id class FSMTConfig(PretrainedConfig): model_type = 'fsmt' attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__(self, langs=['en', 'de'], src_vocab_size=42024, tgt_vocab_size=42024, activation_function='relu', d_model=1024, max_length=200, max_position_embeddings=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, encoder_layerdrop=0.0, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, is_encoder_decoder=True, scale_embedding=True, tie_word_embeddings=False, num_beams=5, length_penalty=1.0, early_stopping=False, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **common_kwargs): self.langs = langs self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std self.activation_function = activation_function self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id) if 'decoder' in common_kwargs: del common_kwargs['decoder'] self.scale_embedding = scale_embedding self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout self.use_cache = use_cache super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, max_length=max_length, num_beams=num_beams, length_penalty=length_penalty, early_stopping=early_stopping, **common_kwargs) # File: transformers-main/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() json_indent = 2 best_score_hparams = {'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}} org_names = {} for m in ['wmt19-ru-en', 'wmt19-en-ru', 'wmt19-en-de', 'wmt19-de-en']: org_names[m] = 'facebook' for m in ['wmt16-en-de-dist-12-1', 'wmt16-en-de-dist-6-1', 'wmt16-en-de-12-1', 'wmt19-de-en-6-6-base', 'wmt19-de-en-6-6-big']: org_names[m] = 'allenai' def rewrite_dict_keys(d): d2 = dict(((re.sub('@@$', '', k), v) if k.endswith('@@') else (re.sub('$', '', k), v) for (k, v) in d.items())) keep_keys = ' '.split() for k in keep_keys: del d2[f'{k}'] d2[k] = d[k] return d2 def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path): assert os.path.exists(fsmt_checkpoint_path) os.makedirs(pytorch_dump_folder_path, exist_ok=True) print(f'Writing results to {pytorch_dump_folder_path}') checkpoint_file = basename(fsmt_checkpoint_path) fsmt_folder_path = dirname(fsmt_checkpoint_path) cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel models = cls.hub_models() kwargs = {'bpe': 'fastbpe', 'tokenizer': 'moses'} data_name_or_path = '.' print(f'using checkpoint {checkpoint_file}') chkpt = hub_utils.from_pretrained(fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs) args = vars(chkpt['args']['model']) src_lang = args['source_lang'] tgt_lang = args['target_lang'] data_root = dirname(pytorch_dump_folder_path) model_dir = basename(pytorch_dump_folder_path) src_dict_file = os.path.join(fsmt_folder_path, f'dict.{src_lang}.txt') tgt_dict_file = os.path.join(fsmt_folder_path, f'dict.{tgt_lang}.txt') src_dict = Dictionary.load(src_dict_file) src_vocab = rewrite_dict_keys(src_dict.indices) src_vocab_size = len(src_vocab) src_vocab_file = os.path.join(pytorch_dump_folder_path, 'vocab-src.json') print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records') with open(src_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent)) do_lower_case = True for k in src_vocab.keys(): if not k.islower(): do_lower_case = False break tgt_dict = Dictionary.load(tgt_dict_file) tgt_vocab = rewrite_dict_keys(tgt_dict.indices) tgt_vocab_size = len(tgt_vocab) tgt_vocab_file = os.path.join(pytorch_dump_folder_path, 'vocab-tgt.json') print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records') with open(tgt_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent)) merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES['merges_file']) for fn in ['bpecodes', 'code']: fsmt_merges_file = os.path.join(fsmt_folder_path, fn) if os.path.exists(fsmt_merges_file): break with open(fsmt_merges_file, encoding='utf-8') as fin: merges = fin.read() merges = re.sub(' \\d+$', '', merges, 0, re.M) print(f'Generating {merges_file}') with open(merges_file, 'w', encoding='utf-8') as fout: fout.write(merges) fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, 'config.json') assert args['bpe'] == 'fastbpe', f"need to extend tokenizer to support bpe={args['bpe']}" assert args['tokenizer'] == 'moses', f"need to extend tokenizer to support bpe={args['tokenizer']}" model_conf = {'architectures': ['FSMTForConditionalGeneration'], 'model_type': 'fsmt', 'activation_dropout': args['activation_dropout'], 'activation_function': 'relu', 'attention_dropout': args['attention_dropout'], 'd_model': args['decoder_embed_dim'], 'dropout': args['dropout'], 'init_std': 0.02, 'max_position_embeddings': args['max_source_positions'], 'num_hidden_layers': args['encoder_layers'], 'src_vocab_size': src_vocab_size, 'tgt_vocab_size': tgt_vocab_size, 'langs': [src_lang, tgt_lang], 'encoder_attention_heads': args['encoder_attention_heads'], 'encoder_ffn_dim': args['encoder_ffn_embed_dim'], 'encoder_layerdrop': args['encoder_layerdrop'], 'encoder_layers': args['encoder_layers'], 'decoder_attention_heads': args['decoder_attention_heads'], 'decoder_ffn_dim': args['decoder_ffn_embed_dim'], 'decoder_layerdrop': args['decoder_layerdrop'], 'decoder_layers': args['decoder_layers'], 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'is_encoder_decoder': True, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_all_embeddings']} model_conf['num_beams'] = 5 model_conf['early_stopping'] = False if model_dir in best_score_hparams and 'length_penalty' in best_score_hparams[model_dir]: model_conf['length_penalty'] = best_score_hparams[model_dir]['length_penalty'] else: model_conf['length_penalty'] = 1.0 print(f'Generating {fsmt_model_config_file}') with open(fsmt_model_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent)) fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE) tokenizer_conf = {'langs': [src_lang, tgt_lang], 'model_max_length': 1024, 'do_lower_case': do_lower_case} print(f'Generating {fsmt_tokenizer_config_file}') with open(fsmt_tokenizer_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent)) model = chkpt['models'][0] model_state_dict = model.state_dict() model_state_dict = OrderedDict((('model.' + k, v) for (k, v) in model_state_dict.items())) ignore_keys = ['model.model', 'model.encoder.version', 'model.decoder.version', 'model.encoder_embed_tokens.weight', 'model.decoder_embed_tokens.weight', 'model.encoder.embed_positions._float_tensor', 'model.decoder.embed_positions._float_tensor'] for k in ignore_keys: model_state_dict.pop(k, None) config = FSMTConfig.from_pretrained(pytorch_dump_folder_path) model_new = FSMTForConditionalGeneration(config) model_new.load_state_dict(model_state_dict, strict=False) pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) print(f'Generating {pytorch_weights_dump_path}') torch.save(model_state_dict, pytorch_weights_dump_path) print('Conversion is done!') print('\nLast step is to upload the files to s3') print(f'cd {data_root}') print(f'transformers-cli upload {model_dir}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--fsmt_checkpoint_path', default=None, type=str, required=True, help='Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts, bpecodes, etc.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/fsmt/modeling_fsmt.py """""" import math from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import Tensor, nn from torch.nn import CrossEntropyLoss, LayerNorm from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fsmt import FSMTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'facebook/wmt19-ru-en' _CONFIG_FOR_DOC = 'FSMTConfig' '' FSMT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FSMTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n' FSMT_GENERATION_EXAMPLE = '\n Translation example::\n\n ```python\n >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration\n\n >>> mname = "facebook/wmt19-ru-en"\n >>> model = FSMTForConditionalGeneration.from_pretrained(mname)\n >>> tokenizer = AutoTokenizer.from_pretrained(mname)\n\n >>> src_text = "Машинное обучение - это здорово, не так ли?"\n >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids\n >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)\n >>> tokenizer.decode(outputs[0], skip_special_tokens=True)\n "Machine learning is great, isn\'t it?"\n ```\n\n' FSMT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at\n the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" def invert_mask(attention_mask): assert attention_mask.dim() == 2 return attention_mask.eq(0) def triu_onnx(x, diagonal=0): l = x.shape[0] arange = torch.arange(l, device=x.device) mask = arange.expand(l, l) arange = arange.unsqueeze(-1) if diagonal: arange = arange + diagonal mask = mask >= arange return x.masked_fill(mask == 0, 0) def _prepare_fsmt_decoder_inputs(config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32): pad_token_id = config.pad_token_id if decoder_input_ids is None: decoder_input_ids = shift_tokens_right(input_ids, pad_token_id) (bsz, tgt_len) = decoder_input_ids.size() if decoder_padding_mask is None: decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id) else: decoder_padding_mask = invert_mask(decoder_padding_mask) causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to(device=decoder_input_ids.device) return (decoder_input_ids, decoder_padding_mask, causal_mask) class PretrainedFSMTModel(PreTrainedModel): config_class = FSMTConfig base_model_prefix = 'model' def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, SinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = {'attention_mask': input_ids.ne(pad_token), 'input_ids': input_ids} return dummy_inputs def _make_linear_from_emb(emb): (vocab_size, emb_size) = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _check_shapes(shape_1, shape2): if shape_1 != shape2: raise AssertionError(f'shape mismatch: {shape_1} != {shape2}') def shift_tokens_right(input_ids, pad_token_id): input_ids.masked_fill_(input_ids == -100, pad_token_id) prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() prev_output_tokens[:, 1:] = input_ids[:, :-1] return prev_output_tokens def make_padding_mask(input_ids, padding_idx=1): padding_mask = input_ids.eq(padding_idx) if not padding_mask.any(): padding_mask = None return padding_mask class EncoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False): residual = x (x, attn_weights) = self.self_attn(query=x, key=x, key_padding_mask=encoder_padding_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return (x, attn_weights) class FSMTEncoder(nn.Module): def __init__(self, config: FSMTConfig, embed_tokens): super().__init__() self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_positions = SinusoidalPositionalEmbedding(config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx) self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: torch.Tensor=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): if attention_mask is not None: attention_mask = invert_mask(attention_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_ids) elif inputs_embeds is not None: inputs_embeds = inputs_embeds * self.embed_scale position_ids = inputs_embeds[:, :, 0].masked_fill(inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx) embed_pos = self.embed_positions(position_ids) else: raise ValueError('You have to specify either input_ids or inputs_embeds') x = inputs_embeds + embed_pos x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = x.transpose(0, 1) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: assert head_mask.size()[0] == len(self.layers), f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: x = x.transpose(0, 1) encoder_states += (x,) x = x.transpose(0, 1) dropout_probability = torch.rand([]) if self.training and dropout_probability < self.layerdrop: attn = None else: (x, attn) = encoder_layer(x, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) if output_attentions: all_attentions = all_attentions + (attn,) x = x.transpose(0, 1) if output_hidden_states: encoder_states += (x,) if not return_dict: return tuple((v for v in [x, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions) class DecoderLayer(nn.Module): def __init__(self, config: FSMTConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Attention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = Attention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, encoder_decoder_attention=True) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def forward(self, x, encoder_hidden_states, encoder_attn_mask=None, layer_state=None, causal_mask=None, layer_head_mask=None, cross_attn_layer_head_mask=None, decoder_padding_mask=None, output_attentions=False): residual = x if layer_state is None: layer_state = {} (x, self_attn_weights) = self.self_attn(query=x, key=x, layer_state=layer_state, key_padding_mask=decoder_padding_mask, attn_mask=causal_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.self_attn_layer_norm(x) residual = x assert self.encoder_attn.cache_key != self.self_attn.cache_key (x, cross_attn_weights) = self.encoder_attn(query=x, key=encoder_hidden_states, key_padding_mask=encoder_attn_mask, layer_state=layer_state, layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.encoder_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.final_layer_norm(x) return (x, self_attn_weights, layer_state, cross_attn_weights) class FSMTDecoder(nn.Module): def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): super().__init__() self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = embed_tokens.padding_idx self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens embed_dim = embed_tokens.embedding_dim self.embed_positions = SinusoidalPositionalEmbedding(config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx) self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None): embed_tokens_weight_shape = self.embed_tokens.weight.shape else: embed_tokens_weight_shape = self.embed_tokens.weight.shape self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False) self.output_projection.weight = self.embed_tokens.weight def _tie_weights(self): self.embed_tokens.weight = self.output_projection.weight def forward(self, input_ids: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_padding_mask: torch.Tensor, decoder_padding_mask: torch.Tensor, decoder_causal_mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, use_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): if encoder_padding_mask is not None: encoder_padding_mask = invert_mask(encoder_padding_mask) if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: positions = self.embed_positions(input_ids) if use_cache: input_ids = input_ids[:, -1:] positions = positions[:, -1:] x = self.embed_tokens(input_ids) * self.embed_scale elif inputs_embeds is not None: position_ids = inputs_embeds[:, :, 0].masked_fill(inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx) positions = self.embed_positions(position_ids) x = inputs_embeds * self.embed_scale else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') x += positions x = nn.functional.dropout(x, p=self.dropout, training=self.training) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if output_attentions else None next_decoder_cache = [] for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: assert attn_mask.size()[0] == len(self.layers), f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.' for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_state = past_key_values[idx] if past_key_values is not None else None (x, layer_self_attn, layer_past, layer_cross_attn) = decoder_layer(x, encoder_hidden_states, encoder_attn_mask=encoder_padding_mask, decoder_padding_mask=decoder_padding_mask, layer_state=layer_state, causal_mask=decoder_causal_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, output_attentions=output_attentions) if use_cache: next_decoder_cache.append(layer_past.copy()) if output_attentions: all_self_attns += (layer_self_attn,) all_cross_attns += (layer_cross_attn,) if output_hidden_states: x = x.transpose(0, 1) all_hidden_states += (x,) x = x.transpose(0, 1) x = x.transpose(0, 1) encoder_hidden_states = encoder_hidden_states.transpose(0, 1) x = self.output_projection(x) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=x, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns) def _reorder_buffer(attn_cache, new_order): for (k, input_buffer_k) in attn_cache.items(): if input_buffer_k is not None: attn_cache[k] = input_buffer_k.index_select(0, new_order) return attn_cache class Attention(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, encoder_decoder_attention=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** (-0.5) self.encoder_decoder_attention = encoder_decoder_attention self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.cache_key = 'encoder_decoder' if self.encoder_decoder_attention else 'self' def _shape(self, tensor, seq_len, bsz): return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) def forward(self, query, key: Optional[Tensor], key_padding_mask: Optional[Tensor]=None, layer_state: Optional[Dict[str, Optional[Tensor]]]=None, attn_mask: Optional[Tensor]=None, layer_head_mask: Optional[Tensor]=None, output_attentions=False) -> Tuple[Tensor, Optional[Tensor]]: static_kv: bool = self.encoder_decoder_attention (tgt_len, bsz, embed_dim) = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if layer_state is not None: saved_state = layer_state.get(self.cache_key, {}) if 'prev_key' in saved_state and static_kv: key = None else: saved_state = None layer_state = {} q = self.q_proj(query) * self.scaling if static_kv: if key is None: k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: k = self.k_proj(query) v = self.v_proj(query) q = self._shape(q, tgt_len, bsz) if k is not None: k = self._shape(k, -1, bsz) if v is not None: v = self._shape(v, -1, bsz) if saved_state is not None: (k, v, key_padding_mask) = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz) layer_state[self.cache_key] = {'prev_key': k.view(bsz, self.num_heads, -1, self.head_dim), 'prev_value': v.view(bsz, self.num_heads, -1, self.head_dim), 'prev_key_padding_mask': key_padding_mask if not static_kv else None} assert k is not None src_len = k.size(1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len) if attn_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None assert key_padding_mask is None or key_padding_mask.size()[:2] == (bsz, src_len) if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2) attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == (self.num_heads,), f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}' attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) assert v is not None attn_output = torch.bmm(attn_probs, v) assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz): if 'prev_key' in saved_state: _prev_key = saved_state['prev_key'] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) if 'prev_value' in saved_state: _prev_value = saved_state['prev_value'] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) assert k is not None and v is not None prev_key_padding_mask: Optional[Tensor] = saved_state.get('prev_key_padding_mask', None) if prev_key_padding_mask is not None: if static_kv: new_key_padding_mask = prev_key_padding_mask else: new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1) else: new_key_padding_mask = key_padding_mask return (k, v, new_key_padding_mask) def fill_with_neg_inf(t): return t.float().fill_(torch.finfo(t.dtype).min).type_as(t) def _get_shape(t): return getattr(t, 'shape', None) @add_start_docstrings('The bare FSMT Model outputting raw hidden-states without any specific head on top.', FSMT_START_DOCSTRING) class FSMTModel(PretrainedFSMTModel): _tied_weights_keys = ['decoder.embed_tokens.weight', 'decoder.output_projection.weight'] def __init__(self, config: FSMTConfig): super().__init__(config) padding_idx = config.pad_token_id encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx) decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx) self.encoder = FSMTEncoder(config, encoder_embed_tokens) self.decoder = FSMTDecoder(config, decoder_embed_tokens) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings()) self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings()) @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[torch.FloatTensor]]=None, past_key_values: Optional[Tuple[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: if decoder_input_ids is None: use_cache = False output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not use_cache and input_ids is not None: (decoder_input_ids, decoder_padding_mask, causal_mask) = _prepare_fsmt_decoder_inputs(self.config, input_ids, decoder_input_ids=decoder_input_ids, decoder_padding_mask=decoder_attention_mask, causal_mask_dtype=self.decoder.embed_tokens.weight.dtype) else: (decoder_padding_mask, causal_mask) = (None, None) if decoder_input_ids is None and decoder_inputs_embeds is None: raise ValueError('Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.') if encoder_outputs is None: encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) decoder_outputs = self.decoder(decoder_input_ids, encoder_outputs[0], attention_mask, decoder_padding_mask, decoder_causal_mask=causal_mask, inputs_embeds=decoder_inputs_embeds, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.encoder.embed_tokens def set_input_embeddings(self, value): self.encoder.embed_tokens = value def get_output_embeddings(self): return self.decoder.embed_tokens def set_output_embeddings(self, value): self.decoder.embed_tokens = value @add_start_docstrings('The FSMT Model with a language modeling head. Can be used for summarization.', FSMT_START_DOCSTRING) class FSMTForConditionalGeneration(PretrainedFSMTModel): base_model_prefix = 'model' _tied_weights_keys = ['decoder.embed_tokens.weight', 'decoder.output_projection.weight'] def __init__(self, config: FSMTConfig): super().__init__(config) base_model = FSMTModel(config) self.model = base_model self.post_init() @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(FSMT_GENERATION_EXAMPLE) def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[torch.FloatTensor]]=None, past_key_values: Optional[Tuple[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.model(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) lm_logits = outputs[0] masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return Seq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs): return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'past_key_values': past_key_values, 'decoder_input_ids': decoder_input_ids, 'attention_mask': attention_mask, 'head_mask': head_mask, 'decoder_head_mask': decoder_head_mask, 'cross_attn_head_mask': cross_attn_head_mask, 'use_cache': use_cache} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = [] for layer_past in past_key_values: layer_past_new = {attn_key: _reorder_buffer(attn_cache, beam_idx) for (attn_key, attn_cache) in layer_past.items()} reordered_past.append(layer_past_new) return reordered_past def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder def get_output_embeddings(self): return self.model.decoder.embed_tokens def set_output_embeddings(self, value): self.model.decoder.embed_tokens = value class SinusoidalPositionalEmbedding(nn.Embedding): def __init__(self, num_positions, embedding_dim, padding_idx): self.make_weight(num_positions, embedding_dim, padding_idx) def make_weight(self, num_positions, embedding_dim, padding_idx): weight = self.get_embedding(num_positions, embedding_dim, padding_idx) if not hasattr(self, 'weight'): super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight) else: weight = weight.to(dtype=self.weight.dtype, device=self.weight.device) self.weight = nn.Parameter(weight) self.weight.detach_() self.weight.requires_grad = False @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx): half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb @staticmethod def make_positions(tensor, padding_idx: int): mask = tensor.ne(padding_idx).int() return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx def forward(self, input, incremental_state: Optional[Any]=None, timestep: Optional[Tensor]=None): (bsz, seq_len) = input.shape[:2] max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weight.size(0): self.make_weight(max_pos, self.embedding_dim, self.padding_idx) positions = self.make_positions(input, self.padding_idx) return super().forward(positions) # File: transformers-main/src/transformers/models/fsmt/tokenization_fsmt.py """""" import json import os import re import unicodedata from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'src_vocab_file': 'vocab-src.json', 'tgt_vocab_file': 'vocab-tgt.json', 'merges_file': 'merges.txt'} def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): text = text.replace(',', ',') text = re.sub('。\\s*', '. ', text) text = text.replace('、', ',') text = text.replace('”', '"') text = text.replace('“', '"') text = text.replace('∶', ':') text = text.replace(':', ':') text = text.replace('?', '?') text = text.replace('《', '"') text = text.replace('》', '"') text = text.replace(')', ')') text = text.replace('!', '!') text = text.replace('(', '(') text = text.replace(';', ';') text = text.replace('1', '1') text = text.replace('」', '"') text = text.replace('「', '"') text = text.replace('0', '0') text = text.replace('3', '3') text = text.replace('2', '2') text = text.replace('5', '5') text = text.replace('6', '6') text = text.replace('9', '9') text = text.replace('7', '7') text = text.replace('8', '8') text = text.replace('4', '4') text = re.sub('.\\s*', '. ', text) text = text.replace('~', '~') text = text.replace('’', "'") text = text.replace('…', '...') text = text.replace('━', '-') text = text.replace('〈', '<') text = text.replace('〉', '>') text = text.replace('【', '[') text = text.replace('】', ']') text = text.replace('%', '%') return text def remove_non_printing_char(text): output = [] for char in text: cat = unicodedata.category(char) if cat.startswith('C'): continue output.append(char) return ''.join(output) class FSMTTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, langs=None, src_vocab_file=None, tgt_vocab_file=None, merges_file=None, do_lower_case=False, unk_token='', bos_token='', sep_token='', pad_token='', **kwargs): try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses self.src_vocab_file = src_vocab_file self.tgt_vocab_file = tgt_vocab_file self.merges_file = merges_file self.do_lower_case = do_lower_case self.cache_moses_punct_normalizer = {} self.cache_moses_tokenizer = {} self.cache_moses_detokenizer = {} if langs and len(langs) == 2: (self.src_lang, self.tgt_lang) = langs else: raise ValueError(f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. Usually that means that tokenizer can't find a mapping for the given model path in and other maps of this tokenizer.") with open(src_vocab_file, encoding='utf-8') as src_vocab_handle: self.encoder = json.load(src_vocab_handle) with open(tgt_vocab_file, encoding='utf-8') as tgt_vocab_handle: tgt_vocab = json.load(tgt_vocab_handle) self.decoder = {v: k for (k, v) in tgt_vocab.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(langs=langs, src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, do_lower_case=do_lower_case, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, **kwargs) def get_vocab(self) -> Dict[str, int]: return self.get_src_vocab() @property def vocab_size(self) -> int: return self.src_vocab_size def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer return self.cache_moses_punct_normalizer[lang].normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer return self.cache_moses_tokenizer[lang].tokenize(text, aggressive_dash_splits=True, return_str=False, escape=True) def moses_detokenize(self, tokens, lang): if lang not in self.cache_moses_detokenizer: moses_detokenizer = self.sm.MosesDetokenizer(lang=lang) self.cache_moses_detokenizer[lang] = moses_detokenizer return self.cache_moses_detokenizer[lang].detokenize(tokens) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text @property def src_vocab_size(self): return len(self.encoder) @property def tgt_vocab_size(self): return len(self.decoder) def get_src_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def get_tgt_vocab(self): return dict(self.decoder, **self.added_tokens_decoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + '' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def _tokenize(self, text, lang='en', bypass_tokenizer=False): lang = self.src_lang if self.do_lower_case: text = text.lower() if bypass_tokenizer: text = text.split() else: text = self.moses_pipeline(text, lang=lang) text = self.moses_tokenize(text, lang=lang) split_tokens = [] for token in text: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): tokens = [t.replace(' ', '').replace('', ' ') for t in tokens] tokens = ''.join(tokens).split() text = self.moses_detokenize(tokens, self.tgt_lang) return text def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if token_ids_1 is None: return token_ids_0 + sep return token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return src_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['src_vocab_file']) tgt_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['tgt_vocab_file']) merges_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(src_vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') with open(tgt_vocab_file, 'w', encoding='utf-8') as f: tgt_vocab = {v: k for (k, v) in self.decoder.items()} f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merges_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (src_vocab_file, tgt_vocab_file, merges_file) def __getstate__(self): state = self.__dict__.copy() state['sm'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses # File: transformers-main/src/transformers/models/funnel/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_funnel': ['FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_funnel_fast'] = ['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_funnel'] = ['FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_funnel'] = ['TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel'] if TYPE_CHECKING: from .configuration_funnel import FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/funnel/configuration_funnel.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FunnelConfig(PretrainedConfig): model_type = 'funnel' attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'n_head'} def __init__(self, vocab_size=30522, block_sizes=[4, 4, 4], block_repeats=None, num_decoder_layers=2, d_model=768, n_head=12, d_head=64, d_inner=3072, hidden_act='gelu_new', hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, initializer_range=0.1, initializer_std=None, layer_norm_eps=1e-09, pooling_type='mean', attention_type='relative_shift', separate_cls=True, truncate_seq=True, pool_q_only=True, **kwargs): self.vocab_size = vocab_size self.block_sizes = block_sizes self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats assert len(block_sizes) == len(self.block_repeats), '`block_sizes` and `block_repeats` should have the same length.' self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.initializer_range = initializer_range self.initializer_std = initializer_std self.layer_norm_eps = layer_norm_eps assert pooling_type in ['mean', 'max'], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." self.pooling_type = pooling_type assert attention_type in ['relative_shift', 'factorized'], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." self.attention_type = attention_type self.separate_cls = separate_cls self.truncate_seq = truncate_seq self.pool_q_only = pool_q_only super().__init__(**kwargs) @property def num_hidden_layers(self): return sum(self.block_sizes) @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError('This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.') @property def num_blocks(self): return len(self.block_sizes) @num_blocks.setter def num_blocks(self, value): raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.') # File: transformers-main/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, base_model): config = FunnelConfig.from_json_file(config_file) print(f'Building PyTorch model from configuration: {config}') model = FunnelBaseModel(config) if base_model else FunnelModel(config) load_tf_weights_in_funnel(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') torch.save(model.state_dict(), pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model) # File: transformers-main/src/transformers/models/funnel/modeling_funnel.py """""" import os from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_funnel import FunnelConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'FunnelConfig' _CHECKPOINT_FOR_DOC = 'funnel-transformer/small' INF = 1000000.0 def load_tf_weights_in_funnel(model, config, tf_checkpoint_path): try: import re import numpy as np import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) _layer_map = {'k': 'k_head', 'q': 'q_head', 'v': 'v_head', 'o': 'post_proj', 'layer_1': 'linear_1', 'layer_2': 'linear_2', 'rel_attn': 'attention', 'ff': 'ffn', 'kernel': 'weight', 'gamma': 'weight', 'beta': 'bias', 'lookup_table': 'weight', 'word_embedding': 'word_embeddings', 'input': 'embeddings'} for (name, array) in zip(names, arrays): name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)): logger.info(f"Skipping {'/'.join(name)}") continue if name[0] == 'generator': continue pointer = model skipped = False for m_name in name[1:]: if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch('layer_\\d+', m_name): layer_index = int(re.search('layer_(\\d+)', m_name).groups()[0]) if layer_index < config.num_hidden_layers: block_idx = 0 while layer_index >= config.block_sizes[block_idx]: layer_index -= config.block_sizes[block_idx] block_idx += 1 pointer = pointer.blocks[block_idx][layer_index] else: layer_index -= config.num_hidden_layers pointer = pointer.layers[layer_index] elif m_name == 'r' and isinstance(pointer, FunnelRelMultiheadAttention): pointer = pointer.r_kernel break elif m_name in _layer_map: pointer = getattr(pointer, _layer_map[m_name]) else: try: pointer = getattr(pointer, m_name) except AttributeError: print(f"Skipping {'/'.join(name)}", array.shape) skipped = True break if not skipped: if len(pointer.shape) != len(array.shape): array = array.reshape(pointer.shape) if m_name == 'kernel': array = np.transpose(array) pointer.data = torch.from_numpy(array) return model class FunnelEmbeddings(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) def forward(self, input_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = self.layer_norm(inputs_embeds) embeddings = self.dropout(embeddings) return embeddings class FunnelAttentionStructure(nn.Module): cls_token_type_id: int = 2 def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.sin_dropout = nn.Dropout(config.hidden_dropout) self.cos_dropout = nn.Dropout(config.hidden_dropout) self.pooling_mult = None def init_attention_inputs(self, inputs_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor]: self.pooling_mult = 1 self.seq_len = seq_len = inputs_embeds.size(1) position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device) token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None cls_mask = nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0)) if self.config.separate_cls else None return (position_embeds, token_type_mat, attention_mask, cls_mask) def token_type_ids_to_mat(self, token_type_ids: torch.Tensor) -> torch.Tensor: token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None] cls_ids = token_type_ids == self.cls_token_type_id cls_mat = cls_ids[:, :, None] | cls_ids[:, None] return cls_mat | token_type_mat def get_position_embeds(self, seq_len: int, dtype: torch.dtype, device: torch.device) -> Union[Tuple[torch.Tensor], List[List[torch.Tensor]]]: d_model = self.config.d_model if self.config.attention_type == 'factorized': pos_seq = torch.arange(0, seq_len, 1.0, dtype=torch.int64, device=device).to(dtype) freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype) inv_freq = 1 / 10000 ** (freq_seq / (d_model // 2)) sinusoid = pos_seq[:, None] * inv_freq[None] sin_embed = torch.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed) cos_embed = torch.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed) phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1) psi = torch.cat([cos_embed, sin_embed], dim=-1) pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1) omega = torch.cat([-sin_embed, cos_embed], dim=-1) return (phi, pi, psi, omega) else: freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype) inv_freq = 1 / 10000 ** (freq_seq / (d_model // 2)) rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=torch.int64, device=device).to(dtype) zero_offset = seq_len * 2 sinusoid = rel_pos_id[:, None] * inv_freq[None] sin_embed = self.sin_dropout(torch.sin(sinusoid)) cos_embed = self.cos_dropout(torch.cos(sinusoid)) pos_embed = torch.cat([sin_embed, cos_embed], dim=-1) pos = torch.arange(0, seq_len, dtype=torch.int64, device=device).to(dtype) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.config.num_blocks): if block_index == 0: position_embeds_pooling = None else: pooled_pos = self.stride_pool_pos(pos, block_index) stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos) pos = pooled_pos stride = 2 ** block_index rel_pos = self.relative_pos(pos, stride) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list def stride_pool_pos(self, pos_id: torch.Tensor, block_index: int): if self.config.separate_cls: cls_pos = pos_id.new_tensor([-2 ** block_index + 1]) pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:] return torch.cat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2] def relative_pos(self, pos: torch.Tensor, stride: int, pooled_pos=None, shift: int=1) -> torch.Tensor: if pooled_pos is None: pooled_pos = pos ref_point = pooled_pos[0] - pos[0] num_remove = shift * len(pooled_pos) max_dist = ref_point + num_remove * stride min_dist = pooled_pos[0] - pos[-1] return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device) def stride_pool(self, tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]], axis: Union[int, Tuple[int], List[int]]) -> torch.Tensor: if tensor is None: return None if isinstance(axis, (list, tuple)): for ax in axis: tensor = self.stride_pool(tensor, ax) return tensor if isinstance(tensor, (tuple, list)): return type(tensor)((self.stride_pool(x, axis) for x in tensor)) axis %= tensor.ndim axis_slice = slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2) enc_slice = [slice(None)] * axis + [axis_slice] if self.config.separate_cls: cls_slice = [slice(None)] * axis + [slice(None, 1)] tensor = torch.cat([tensor[cls_slice], tensor], axis=axis) return tensor[enc_slice] def pool_tensor(self, tensor: Union[torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor]], mode: str='mean', stride: int=2) -> torch.Tensor: if tensor is None: return None if isinstance(tensor, (tuple, list)): return type(tensor)((self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)) if self.config.separate_cls: suffix = tensor[:, :-1] if self.config.truncate_seq else tensor tensor = torch.cat([tensor[:, :1], suffix], dim=1) ndim = tensor.ndim if ndim == 2: tensor = tensor[:, None, :, None] elif ndim == 3: tensor = tensor[:, None, :, :] stride = (stride, 1) if mode == 'mean': tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == 'max': tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == 'min': tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True) else: raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.") if ndim == 2: return tensor[:, 0, :, 0] elif ndim == 3: return tensor[:, 0] return tensor def pre_attention_pooling(self, output, attention_inputs: Tuple[torch.Tensor]) -> Tuple[torch.Tensor, Tuple[torch.Tensor]]: (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs if self.config.pool_q_only: if self.config.attention_type == 'factorized': position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:] token_type_mat = self.stride_pool(token_type_mat, 1) cls_mask = self.stride_pool(cls_mask, 0) output = self.pool_tensor(output, mode=self.config.pooling_type) else: self.pooling_mult *= 2 if self.config.attention_type == 'factorized': position_embeds = self.stride_pool(position_embeds, 0) token_type_mat = self.stride_pool(token_type_mat, [1, 2]) cls_mask = self.stride_pool(cls_mask, [1, 2]) attention_mask = self.pool_tensor(attention_mask, mode='min') output = self.pool_tensor(output, mode=self.config.pooling_type) attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return (output, attention_inputs) def post_attention_pooling(self, attention_inputs: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs if self.config.pool_q_only: self.pooling_mult *= 2 if self.config.attention_type == 'factorized': position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0) token_type_mat = self.stride_pool(token_type_mat, 2) cls_mask = self.stride_pool(cls_mask, 1) attention_mask = self.pool_tensor(attention_mask, mode='min') attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return attention_inputs def _relative_shift_gather(positional_attn: torch.Tensor, context_len: int, shift: int) -> torch.Tensor: (batch_size, n_head, seq_len, max_rel_len) = positional_attn.shape positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len]) positional_attn = positional_attn[:, :, shift:, :] positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift]) positional_attn = positional_attn[..., :context_len] return positional_attn class FunnelRelMultiheadAttention(nn.Module): def __init__(self, config: FunnelConfig, block_index: int) -> None: super().__init__() self.config = config self.block_index = block_index (d_model, n_head, d_head) = (config.d_model, config.n_head, config.d_head) self.hidden_dropout = nn.Dropout(config.hidden_dropout) self.attention_dropout = nn.Dropout(config.attention_dropout) self.q_head = nn.Linear(d_model, n_head * d_head, bias=False) self.k_head = nn.Linear(d_model, n_head * d_head) self.v_head = nn.Linear(d_model, n_head * d_head) self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head])) self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head])) self.post_proj = nn.Linear(n_head * d_head, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps) self.scale = 1.0 / d_head ** 0.5 def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None): if self.config.attention_type == 'factorized': (phi, pi, psi, omega) = position_embeds u = self.r_r_bias * self.scale w_r = self.r_kernel q_r_attention = torch.einsum('binh,dnh->bind', q_head + u, w_r) q_r_attention_1 = q_r_attention * phi[:, None] q_r_attention_2 = q_r_attention * pi[:, None] positional_attn = torch.einsum('bind,jd->bnij', q_r_attention_1, psi) + torch.einsum('bind,jd->bnij', q_r_attention_2, omega) else: shift = 2 if q_head.shape[1] != context_len else 1 r = position_embeds[self.block_index][shift - 1] v = self.r_r_bias * self.scale w_r = self.r_kernel r_head = torch.einsum('td,dnh->tnh', r, w_r) positional_attn = torch.einsum('binh,tnh->bnit', q_head + v, r_head) positional_attn = _relative_shift_gather(positional_attn, context_len, shift) if cls_mask is not None: positional_attn *= cls_mask return positional_attn def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None): if token_type_mat is None: return 0 (batch_size, seq_len, context_len) = token_type_mat.shape r_s_bias = self.r_s_bias * self.scale token_type_bias = torch.einsum('bind,snd->bnis', q_head + r_s_bias, self.seg_embed) token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len]) (diff_token_type, same_token_type) = torch.split(token_type_bias, 1, dim=-1) token_type_attn = torch.where(token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape)) if cls_mask is not None: token_type_attn *= cls_mask return token_type_attn def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_inputs: Tuple[torch.Tensor], output_attentions: bool=False) -> Tuple[torch.Tensor, ...]: (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs (batch_size, seq_len, _) = query.shape context_len = key.shape[1] (n_head, d_head) = (self.config.n_head, self.config.d_head) q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head) k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head) v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head) q_head = q_head * self.scale r_w_bias = self.r_w_bias * self.scale content_score = torch.einsum('bind,bjnd->bnij', q_head + r_w_bias, k_head) positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask) token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask) attn_score = content_score + positional_attn + token_type_attn dtype = attn_score.dtype attn_score = attn_score.float() if attention_mask is not None: attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float()) attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype) attn_prob = self.attention_dropout(attn_prob) attn_vec = torch.einsum('bnij,bjnd->bind', attn_prob, v_head) attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head)) attn_out = self.hidden_dropout(attn_out) output = self.layer_norm(query + attn_out) return (output, attn_prob) if output_attentions else (output,) class FunnelPositionwiseFFN(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.linear_1 = nn.Linear(config.d_model, config.d_inner) self.activation_function = ACT2FN[config.hidden_act] self.activation_dropout = nn.Dropout(config.activation_dropout) self.linear_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) def forward(self, hidden: torch.Tensor) -> torch.Tensor: h = self.linear_1(hidden) h = self.activation_function(h) h = self.activation_dropout(h) h = self.linear_2(h) h = self.dropout(h) return self.layer_norm(hidden + h) class FunnelLayer(nn.Module): def __init__(self, config: FunnelConfig, block_index: int) -> None: super().__init__() self.attention = FunnelRelMultiheadAttention(config, block_index) self.ffn = FunnelPositionwiseFFN(config) def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_inputs, output_attentions: bool=False) -> Tuple: attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions) output = self.ffn(attn[0]) return (output, attn[1]) if output_attentions else (output,) class FunnelEncoder(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.blocks = nn.ModuleList([nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)]) for (block_index, block_size) in enumerate(config.block_sizes)]) def forward(self, inputs_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[Tuple, BaseModelOutput]: attention_mask = attention_mask.type_as(inputs_embeds) attention_inputs = self.attention_structure.init_attention_inputs(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids) hidden = inputs_embeds all_hidden_states = (inputs_embeds,) if output_hidden_states else None all_attentions = () if output_attentions else None for (block_index, block) in enumerate(self.blocks): pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1) pooling_flag = pooling_flag and block_index > 0 if pooling_flag: (pooled_hidden, attention_inputs) = self.attention_structure.pre_attention_pooling(hidden, attention_inputs) for (layer_index, layer) in enumerate(block): for repeat_index in range(self.config.block_repeats[block_index]): do_pooling = repeat_index == 0 and layer_index == 0 and pooling_flag if do_pooling: query = pooled_hidden key = value = hidden if self.config.pool_q_only else pooled_hidden else: query = key = value = hidden layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if do_pooling: attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs) if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple((v for v in [hidden, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def upsample(x: torch.Tensor, stride: int, target_len: int, separate_cls: bool=True, truncate_seq: bool=False) -> torch.Tensor: if stride == 1: return x if separate_cls: cls = x[:, :1] x = x[:, 1:] output = torch.repeat_interleave(x, repeats=stride, dim=1) if separate_cls: if truncate_seq: output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0)) output = output[:, :target_len - 1] output = torch.cat([cls, output], dim=1) else: output = output[:, :target_len] return output class FunnelDecoder(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)]) def forward(self, final_hidden: torch.Tensor, first_block_hidden: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[Tuple, BaseModelOutput]: upsampled_hidden = upsample(final_hidden, stride=2 ** (len(self.config.block_sizes) - 1), target_len=first_block_hidden.shape[1], separate_cls=self.config.separate_cls, truncate_seq=self.config.truncate_seq) hidden = upsampled_hidden + first_block_hidden all_hidden_states = (hidden,) if output_hidden_states else None all_attentions = () if output_attentions else None attention_inputs = self.attention_structure.init_attention_inputs(hidden, attention_mask=attention_mask, token_type_ids=token_type_ids) for layer in self.layers: layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple((v for v in [hidden, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) class FunnelDiscriminatorPredictions(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.dense = nn.Linear(config.d_model, config.d_model) self.dense_prediction = nn.Linear(config.d_model, 1) def forward(self, discriminator_hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(discriminator_hidden_states) hidden_states = ACT2FN[self.config.hidden_act](hidden_states) logits = self.dense_prediction(hidden_states).squeeze(-1) return logits class FunnelPreTrainedModel(PreTrainedModel): config_class = FunnelConfig load_tf_weights = load_tf_weights_in_funnel base_model_prefix = 'funnel' def _init_weights(self, module): classname = module.__class__.__name__ if classname.find('Linear') != -1: if getattr(module, 'weight', None) is not None: if self.config.initializer_std is None: (fan_out, fan_in) = module.weight.shape std = np.sqrt(1.0 / float(fan_in + fan_out)) else: std = self.config.initializer_std nn.init.normal_(module.weight, std=std) if getattr(module, 'bias', None) is not None: nn.init.constant_(module.bias, 0.0) elif classname == 'FunnelRelMultiheadAttention': nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_kernel, b=self.config.initializer_range) nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range) nn.init.uniform_(module.seg_embed, b=self.config.initializer_range) elif classname == 'FunnelEmbeddings': std = 1.0 if self.config.initializer_std is None else self.config.initializer_std nn.init.normal_(module.word_embeddings.weight, std=std) if module.word_embeddings.padding_idx is not None: module.word_embeddings.weight.data[module.padding_idx].zero_() class FunnelClassificationHead(nn.Module): def __init__(self, config: FunnelConfig, n_labels: int) -> None: super().__init__() self.linear_hidden = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.linear_out = nn.Linear(config.d_model, n_labels) def forward(self, hidden: torch.Tensor) -> torch.Tensor: hidden = self.linear_hidden(hidden) hidden = torch.tanh(hidden) hidden = self.dropout(hidden) return self.linear_out(hidden) @dataclass class FunnelForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None FUNNEL_START_DOCSTRING = '\n\n The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient\n Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FunnelConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FUNNEL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('\n The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called\n decoder) or any task-specific head on top.\n ', FUNNEL_START_DOCSTRING) class FunnelBaseModel(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None: self.embeddings.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) inputs_embeds = self.embeddings(input_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return encoder_outputs @add_start_docstrings('The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.', FUNNEL_START_DOCSTRING) class FunnelModel(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.config = config self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None: self.embeddings.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) inputs_embeds = self.embeddings(input_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) decoder_outputs = self.decoder(final_hidden=encoder_outputs[0], first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]], attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: idx = 0 outputs = (decoder_outputs[0],) if output_hidden_states: idx += 1 outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],) if output_attentions: idx += 1 outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],) return outputs return BaseModelOutput(last_hidden_state=decoder_outputs[0], hidden_states=encoder_outputs.hidden_states + decoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions + decoder_outputs.attentions if output_attentions else None) add_start_docstrings('\n Funnel Transformer model with a binary classification head on top as used during pretraining for identifying\n generated tokens.\n ', FUNNEL_START_DOCSTRING) class FunnelForPreTraining(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=FunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, FunnelForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.BCEWithLogitsLoss() if attention_mask is not None: active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1 active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss] active_labels = labels[active_loss] loss = loss_fct(active_logits, active_labels.float()) else: loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return (loss,) + output if loss is not None else output return FunnelForPreTrainingOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) @add_start_docstrings('Funnel Transformer Model with a `language modeling` head on top.', FUNNEL_START_DOCSTRING) class FunnelForMaskedLM(FunnelPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.lm_head def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None: self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='') def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MaskedLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] prediction_logits = self.lm_head(last_hidden_state) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the\n first timestep of the last hidden state) e.g. for GLUE tasks.\n ', FUNNEL_START_DOCSTRING) class FunnelForSequenceClassification(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.config = config self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Funnel Transformer Model with a multiple choice classification head on top (two linear layer on top of the first\n timestep of the last hidden state, and a softmax) e.g. for RocStories/SWAG tasks.\n ', FUNNEL_START_DOCSTRING) class FunnelForMultipleChoice(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, MultipleChoiceModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Funnel Transformer Model with a token classification head on top (a linear layer on top of the hidden-states\n output) e.g. for Named-Entity-Recognition (NER) tasks.\n ', FUNNEL_START_DOCSTRING) class FunnelForTokenClassification(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] last_hidden_state = self.dropout(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Funnel Transformer Model with a span classification head on top for extractive question-answering tasks like SQuAD\n (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FUNNEL_START_DOCSTRING) class FunnelForQuestionAnswering(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = outputs[0] logits = self.qa_outputs(last_hidden_state) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/funnel/modeling_tf_funnel.py """""" from __future__ import annotations import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput from ...modeling_tf_utils import TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_funnel import FunnelConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'FunnelConfig' INF = 1000000.0 class TFFunnelEmbeddings(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.initializer_std = 1.0 if config.initializer_std is None else config.initializer_std self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.dropout = keras.layers.Dropout(rate=config.hidden_dropout) def build(self, input_shape=None): with tf.name_scope('word_embeddings'): self.weight = self.add_weight(name='weight', shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_std)) if self.built: return self.built = True if getattr(self, 'LayerNorm', None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.d_model]) def call(self, input_ids=None, inputs_embeds=None, training=False): assert not (input_ids is None and inputs_embeds is None) assert not (input_ids is not None and inputs_embeds is not None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(self.weight, input_ids) final_embeddings = self.LayerNorm(inputs=inputs_embeds) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFFunnelAttentionStructure: cls_token_type_id: int = 2 def __init__(self, config): self.d_model = config.d_model self.attention_type = config.attention_type self.num_blocks = config.num_blocks self.separate_cls = config.separate_cls self.truncate_seq = config.truncate_seq self.pool_q_only = config.pool_q_only self.pooling_type = config.pooling_type self.sin_dropout = keras.layers.Dropout(config.hidden_dropout) self.cos_dropout = keras.layers.Dropout(config.hidden_dropout) self.pooling_mult = None def init_attention_inputs(self, inputs_embeds, attention_mask=None, token_type_ids=None, training=False): self.pooling_mult = 1 self.seq_len = seq_len = shape_list(inputs_embeds)[1] position_embeds = self.get_position_embeds(seq_len, training=training) token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None cls_mask = tf.pad(tf.ones([seq_len - 1, seq_len - 1], dtype=inputs_embeds.dtype), [[1, 0], [1, 0]]) if self.separate_cls else None return (position_embeds, token_type_mat, attention_mask, cls_mask) def token_type_ids_to_mat(self, token_type_ids): token_type_mat = tf.equal(tf.expand_dims(token_type_ids, -1), tf.expand_dims(token_type_ids, -2)) cls_ids = tf.equal(token_type_ids, tf.constant([self.cls_token_type_id], dtype=token_type_ids.dtype)) cls_mat = tf.logical_or(tf.expand_dims(cls_ids, -1), tf.expand_dims(cls_ids, -2)) return tf.logical_or(cls_mat, token_type_mat) def get_position_embeds(self, seq_len, training=False): if self.attention_type == 'factorized': pos_seq = tf.range(0, seq_len, 1.0) freq_seq = tf.range(0, self.d_model // 2, 1.0) inv_freq = 1 / 10000 ** (freq_seq / (self.d_model // 2)) sinusoid = tf.einsum('i,d->id', pos_seq, inv_freq) sin_embed = tf.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed, training=training) cos_embed = tf.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed, training=training) phi = tf.concat([sin_embed_d, sin_embed_d], axis=-1) psi = tf.concat([cos_embed, sin_embed], axis=-1) pi = tf.concat([cos_embed_d, cos_embed_d], axis=-1) omega = tf.concat([-sin_embed, cos_embed], axis=-1) return (phi, pi, psi, omega) else: freq_seq = tf.range(0, self.d_model // 2, 1.0) inv_freq = 1 / 10000 ** (freq_seq / (self.d_model // 2)) rel_pos_id = tf.range(-seq_len * 2, seq_len * 2, 1.0) zero_offset = seq_len * tf.constant(2) sinusoid = tf.einsum('i,d->id', rel_pos_id, inv_freq) sin_embed = self.sin_dropout(tf.sin(sinusoid), training=training) cos_embed = self.cos_dropout(tf.cos(sinusoid), training=training) pos_embed = tf.concat([sin_embed, cos_embed], axis=-1) pos = tf.range(0, seq_len) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.num_blocks): position_embeds_pooling = tf.fill([1], value=-1.0) if block_index != 0: pooled_pos = self.stride_pool_pos(pos, block_index) stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype) rel_pos = rel_pos + zero_offset position_embeds_pooling = tf.gather(pos_embed, rel_pos, axis=0) pos = pooled_pos stride = 2 ** block_index rel_pos = self.relative_pos(pos, stride) rel_pos = tf.cast(rel_pos, dtype=zero_offset.dtype) rel_pos = rel_pos + zero_offset tf.debugging.assert_less(rel_pos, tf.shape(pos_embed)[0]) position_embeds_no_pooling = tf.gather(pos_embed, rel_pos, axis=0) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list def stride_pool_pos(self, pos_id, block_index): if self.separate_cls: cls_pos = tf.constant([-2 ** block_index + 1], dtype=pos_id.dtype) pooled_pos_id = pos_id[1:-1] if self.truncate_seq else pos_id[1:] return tf.concat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2] def relative_pos(self, pos, stride, pooled_pos=None, shift=1): if pooled_pos is None: pooled_pos = pos ref_point = pooled_pos[0] - pos[0] num_remove = shift * shape_list(pooled_pos)[0] max_dist = ref_point + num_remove * stride min_dist = pooled_pos[0] - pos[-1] return tf.range(max_dist, min_dist - 1, -stride) def stride_pool(self, tensor, axis): if tensor is None: return None if isinstance(axis, (list, tuple)): for ax in axis: tensor = self.stride_pool(tensor, ax) return tensor if isinstance(tensor, (tuple, list)): return type(tensor)((self.stride_pool(x, axis) for x in tensor)) axis %= len(shape_list(tensor)) axis_slice = slice(None, -1, 2) if self.separate_cls and self.truncate_seq else slice(None, None, 2) enc_slice = [slice(None)] * axis + [axis_slice] if self.separate_cls: cls_slice = [slice(None)] * axis + [slice(None, 1)] tensor = tf.concat([tensor[cls_slice], tensor], axis) return tensor[enc_slice] def pool_tensor(self, tensor, mode='mean', stride=2): if tensor is None: return None if isinstance(tensor, (tuple, list)): return type(tensor)((self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor)) if self.separate_cls: suffix = tensor[:, :-1] if self.truncate_seq else tensor tensor = tf.concat([tensor[:, :1], suffix], axis=1) ndim = len(shape_list(tensor)) if ndim == 2: tensor = tensor[:, :, None] if mode == 'mean': tensor = tf.nn.avg_pool1d(tensor, stride, strides=stride, data_format='NWC', padding='SAME') elif mode == 'max': tensor = tf.nn.max_pool1d(tensor, stride, strides=stride, data_format='NWC', padding='SAME') elif mode == 'min': tensor = -tf.nn.max_pool1d(-tensor, stride, strides=stride, data_format='NWC', padding='SAME') else: raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.") return tf.squeeze(tensor, 2) if ndim == 2 else tensor def pre_attention_pooling(self, output, attention_inputs): (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs if self.pool_q_only: if self.attention_type == 'factorized': position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:] token_type_mat = self.stride_pool(token_type_mat, 1) cls_mask = self.stride_pool(cls_mask, 0) output = self.pool_tensor(output, mode=self.pooling_type) else: self.pooling_mult *= 2 if self.attention_type == 'factorized': position_embeds = self.stride_pool(position_embeds, 0) token_type_mat = self.stride_pool(token_type_mat, [1, 2]) cls_mask = self.stride_pool(cls_mask, [1, 2]) attention_mask = self.pool_tensor(attention_mask, mode='min') output = self.pool_tensor(output, mode=self.pooling_type) attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return (output, attention_inputs) def post_attention_pooling(self, attention_inputs): (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs if self.pool_q_only: self.pooling_mult *= 2 if self.attention_type == 'factorized': position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0) token_type_mat = self.stride_pool(token_type_mat, 2) cls_mask = self.stride_pool(cls_mask, 1) attention_mask = self.pool_tensor(attention_mask, mode='min') attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return attention_inputs def _relative_shift_gather(positional_attn, context_len, shift): (batch_size, n_head, seq_len, max_rel_len) = shape_list(positional_attn) positional_attn = tf.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len]) positional_attn = positional_attn[:, :, shift:, :] positional_attn = tf.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift]) positional_attn = positional_attn[..., :context_len] return positional_attn class TFFunnelRelMultiheadAttention(keras.layers.Layer): def __init__(self, config, block_index, **kwargs): super().__init__(**kwargs) self.attention_type = config.attention_type self.n_head = n_head = config.n_head self.d_head = d_head = config.d_head self.d_model = d_model = config.d_model self.initializer_range = config.initializer_range self.block_index = block_index self.hidden_dropout = keras.layers.Dropout(config.hidden_dropout) self.attention_dropout = keras.layers.Dropout(config.attention_dropout) initializer = get_initializer(config.initializer_range) self.q_head = keras.layers.Dense(n_head * d_head, use_bias=False, kernel_initializer=initializer, name='q_head') self.k_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name='k_head') self.v_head = keras.layers.Dense(n_head * d_head, kernel_initializer=initializer, name='v_head') self.post_proj = keras.layers.Dense(d_model, kernel_initializer=initializer, name='post_proj') self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.scale = 1.0 / d_head ** 0.5 def build(self, input_shape=None): (n_head, d_head, d_model) = (self.n_head, self.d_head, self.d_model) initializer = get_initializer(self.initializer_range) self.r_w_bias = self.add_weight(shape=(n_head, d_head), initializer=initializer, trainable=True, name='r_w_bias') self.r_r_bias = self.add_weight(shape=(n_head, d_head), initializer=initializer, trainable=True, name='r_r_bias') self.r_kernel = self.add_weight(shape=(d_model, n_head, d_head), initializer=initializer, trainable=True, name='r_kernel') self.r_s_bias = self.add_weight(shape=(n_head, d_head), initializer=initializer, trainable=True, name='r_s_bias') self.seg_embed = self.add_weight(shape=(2, n_head, d_head), initializer=initializer, trainable=True, name='seg_embed') if self.built: return self.built = True if getattr(self, 'q_head', None) is not None: with tf.name_scope(self.q_head.name): self.q_head.build([None, None, d_model]) if getattr(self, 'k_head', None) is not None: with tf.name_scope(self.k_head.name): self.k_head.build([None, None, d_model]) if getattr(self, 'v_head', None) is not None: with tf.name_scope(self.v_head.name): self.v_head.build([None, None, d_model]) if getattr(self, 'post_proj', None) is not None: with tf.name_scope(self.post_proj.name): self.post_proj.build([None, None, n_head * d_head]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, d_model]) def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None): if self.attention_type == 'factorized': (phi, pi, psi, omega) = position_embeds u = self.r_r_bias * self.scale w_r = self.r_kernel q_r_attention = tf.einsum('binh,dnh->bind', q_head + u, w_r) q_r_attention_1 = q_r_attention * phi[:, None] q_r_attention_2 = q_r_attention * pi[:, None] positional_attn = tf.einsum('bind,jd->bnij', q_r_attention_1, psi) + tf.einsum('bind,jd->bnij', q_r_attention_2, omega) else: if shape_list(q_head)[1] != context_len: shift = 2 r = position_embeds[self.block_index][1] else: shift = 1 r = position_embeds[self.block_index][0] v = self.r_r_bias * self.scale w_r = self.r_kernel r_head = tf.einsum('td,dnh->tnh', r, w_r) positional_attn = tf.einsum('binh,tnh->bnit', q_head + v, r_head) positional_attn = _relative_shift_gather(positional_attn, context_len, shift) if cls_mask is not None: positional_attn *= cls_mask return positional_attn def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None): if token_type_mat is None: return 0 (batch_size, seq_len, context_len) = shape_list(token_type_mat) r_s_bias = self.r_s_bias * self.scale token_type_bias = tf.einsum('bind,snd->bnis', q_head + r_s_bias, self.seg_embed) token_type_mat = tf.tile(token_type_mat[:, None], [1, shape_list(q_head)[2], 1, 1]) (diff_token_type, same_token_type) = tf.split(token_type_bias, 2, axis=-1) token_type_attn = tf.where(token_type_mat, tf.tile(same_token_type, [1, 1, 1, context_len]), tf.tile(diff_token_type, [1, 1, 1, context_len])) if cls_mask is not None: token_type_attn *= cls_mask return token_type_attn def call(self, query, key, value, attention_inputs, output_attentions=False, training=False): (position_embeds, token_type_mat, attention_mask, cls_mask) = attention_inputs (batch_size, seq_len, _) = shape_list(query) context_len = shape_list(key)[1] (n_head, d_head) = (self.n_head, self.d_head) q_head = tf.reshape(self.q_head(query), [batch_size, seq_len, n_head, d_head]) k_head = tf.reshape(self.k_head(key), [batch_size, context_len, n_head, d_head]) v_head = tf.reshape(self.v_head(value), [batch_size, context_len, n_head, d_head]) q_head = q_head * self.scale r_w_bias = self.r_w_bias * self.scale content_score = tf.einsum('bind,bjnd->bnij', q_head + r_w_bias, k_head) positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask) token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask) attn_score = content_score + positional_attn + token_type_attn if attention_mask is not None: attention_mask = tf.cast(attention_mask, dtype=attn_score.dtype) attn_score = attn_score - INF * (1 - attention_mask[:, None, None]) attn_prob = stable_softmax(attn_score, axis=-1) attn_prob = self.attention_dropout(attn_prob, training=training) attn_vec = tf.einsum('bnij,bjnd->bind', attn_prob, v_head) attn_out = self.post_proj(tf.reshape(attn_vec, [batch_size, seq_len, n_head * d_head])) attn_out = self.hidden_dropout(attn_out, training=training) output = self.layer_norm(query + attn_out) return (output, attn_prob) if output_attentions else (output,) class TFFunnelPositionwiseFFN(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.linear_1 = keras.layers.Dense(config.d_inner, kernel_initializer=initializer, name='linear_1') self.activation_function = get_tf_activation(config.hidden_act) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.linear_2 = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name='linear_2') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.config = config def call(self, hidden, training=False): h = self.linear_1(hidden) h = self.activation_function(h) h = self.activation_dropout(h, training=training) h = self.linear_2(h) h = self.dropout(h, training=training) return self.layer_norm(hidden + h) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'linear_1', None) is not None: with tf.name_scope(self.linear_1.name): self.linear_1.build([None, None, self.config.d_model]) if getattr(self, 'linear_2', None) is not None: with tf.name_scope(self.linear_2.name): self.linear_2.build([None, None, self.config.d_inner]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.d_model]) class TFFunnelLayer(keras.layers.Layer): def __init__(self, config, block_index, **kwargs): super().__init__(**kwargs) self.attention = TFFunnelRelMultiheadAttention(config, block_index, name='attention') self.ffn = TFFunnelPositionwiseFFN(config, name='ffn') def call(self, query, key, value, attention_inputs, output_attentions=False, training=False): attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions, training=training) output = self.ffn(attn[0], training=training) return (output, attn[1]) if output_attentions else (output,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'ffn', None) is not None: with tf.name_scope(self.ffn.name): self.ffn.build(None) class TFFunnelEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.separate_cls = config.separate_cls self.pool_q_only = config.pool_q_only self.block_repeats = config.block_repeats self.attention_structure = TFFunnelAttentionStructure(config) self.blocks = [[TFFunnelLayer(config, block_index, name=f'blocks_._{block_index}_._{i}') for i in range(block_size)] for (block_index, block_size) in enumerate(config.block_sizes)] def call(self, inputs_embeds, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=False): attention_inputs = self.attention_structure.init_attention_inputs(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, training=training) hidden = inputs_embeds all_hidden_states = (inputs_embeds,) if output_hidden_states else None all_attentions = () if output_attentions else None for (block_index, block) in enumerate(self.blocks): pooling_flag = shape_list(hidden)[1] > (2 if self.separate_cls else 1) pooling_flag = pooling_flag and block_index > 0 pooled_hidden = tf.zeros(shape_list(hidden)) if pooling_flag: (pooled_hidden, attention_inputs) = self.attention_structure.pre_attention_pooling(hidden, attention_inputs) for (layer_index, layer) in enumerate(block): for repeat_index in range(self.block_repeats[block_index]): do_pooling = repeat_index == 0 and layer_index == 0 and pooling_flag if do_pooling: query = pooled_hidden key = value = hidden if self.pool_q_only else pooled_hidden else: query = key = value = hidden layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions, training=training) hidden = layer_output[0] if do_pooling: attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs) if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple((v for v in [hidden, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True for block in self.blocks: for layer in block: with tf.name_scope(layer.name): layer.build(None) def upsample(x, stride, target_len, separate_cls=True, truncate_seq=False): if stride == 1: return x if separate_cls: cls = x[:, :1] x = x[:, 1:] output = tf.repeat(x, repeats=stride, axis=1) if separate_cls: if truncate_seq: output = tf.pad(output, [[0, 0], [0, stride - 1], [0, 0]]) output = output[:, :target_len - 1] output = tf.concat([cls, output], axis=1) else: output = output[:, :target_len] return output class TFFunnelDecoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.separate_cls = config.separate_cls self.truncate_seq = config.truncate_seq self.stride = 2 ** (len(config.block_sizes) - 1) self.attention_structure = TFFunnelAttentionStructure(config) self.layers = [TFFunnelLayer(config, 0, name=f'layers_._{i}') for i in range(config.num_decoder_layers)] def call(self, final_hidden, first_block_hidden, attention_mask=None, token_type_ids=None, output_attentions=False, output_hidden_states=False, return_dict=True, training=False): upsampled_hidden = upsample(final_hidden, stride=self.stride, target_len=shape_list(first_block_hidden)[1], separate_cls=self.separate_cls, truncate_seq=self.truncate_seq) hidden = upsampled_hidden + first_block_hidden all_hidden_states = (hidden,) if output_hidden_states else None all_attentions = () if output_attentions else None attention_inputs = self.attention_structure.init_attention_inputs(hidden, attention_mask=attention_mask, token_type_ids=token_type_ids, training=training) for layer in self.layers: layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions, training=training) hidden = layer_output[0] if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple((v for v in [hidden, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFFunnelBaseLayer(keras.layers.Layer): config_class = FunnelConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFFunnelEmbeddings(config, name='embeddings') self.encoder = TFFunnelEncoder(config, name='encoder') def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids, training=training) encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return encoder_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) @keras_serializable class TFFunnelMainLayer(keras.layers.Layer): config_class = FunnelConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.block_sizes = config.block_sizes self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFFunnelEmbeddings(config, name='embeddings') self.encoder = TFFunnelEncoder(config, name='encoder') self.decoder = TFFunnelDecoder(config, name='decoder') def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids=None, attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) if inputs_embeds is None: inputs_embeds = self.embeddings(input_ids, training=training) encoder_outputs = self.encoder(inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, training=training) decoder_outputs = self.decoder(final_hidden=encoder_outputs[0], first_block_hidden=encoder_outputs[1][self.block_sizes[0]], attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) if not return_dict: idx = 0 outputs = (decoder_outputs[0],) if output_hidden_states: idx += 1 outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],) if output_attentions: idx += 1 outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],) return outputs return TFBaseModelOutput(last_hidden_state=decoder_outputs[0], hidden_states=encoder_outputs.hidden_states + decoder_outputs.hidden_states if output_hidden_states else None, attentions=encoder_outputs.attentions + decoder_outputs.attentions if output_attentions else None) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'decoder', None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) class TFFunnelDiscriminatorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.dense = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name='dense') self.activation_function = get_tf_activation(config.hidden_act) self.dense_prediction = keras.layers.Dense(1, kernel_initializer=initializer, name='dense_prediction') self.config = config def call(self, discriminator_hidden_states): hidden_states = self.dense(discriminator_hidden_states) hidden_states = self.activation_function(hidden_states) logits = tf.squeeze(self.dense_prediction(hidden_states)) return logits def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'dense', None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.d_model]) if getattr(self, 'dense_prediction', None) is not None: with tf.name_scope(self.dense_prediction.name): self.dense_prediction.build([None, None, self.config.d_model]) class TFFunnelMaskedLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer='zeros', trainable=True, name='bias') super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {'bias': self.bias} def set_bias(self, value): self.bias = value['bias'] self.config.vocab_size = shape_list(value['bias'])[0] def call(self, hidden_states, training=False): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFFunnelClassificationHead(keras.layers.Layer): def __init__(self, config, n_labels, **kwargs): super().__init__(**kwargs) initializer = get_initializer(config.initializer_range) self.linear_hidden = keras.layers.Dense(config.d_model, kernel_initializer=initializer, name='linear_hidden') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.linear_out = keras.layers.Dense(n_labels, kernel_initializer=initializer, name='linear_out') self.config = config def call(self, hidden, training=False): hidden = self.linear_hidden(hidden) hidden = keras.activations.tanh(hidden) hidden = self.dropout(hidden, training=training) return self.linear_out(hidden) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'linear_hidden', None) is not None: with tf.name_scope(self.linear_hidden.name): self.linear_hidden.build([None, None, self.config.d_model]) if getattr(self, 'linear_out', None) is not None: with tf.name_scope(self.linear_out.name): self.linear_out.build([None, None, self.config.d_model]) class TFFunnelPreTrainedModel(TFPreTrainedModel): config_class = FunnelConfig base_model_prefix = 'funnel' @property def dummy_inputs(self): return {'input_ids': tf.ones((1, 3), dtype=tf.int32)} @dataclass class TFFunnelForPreTrainingOutput(ModelOutput): logits: tf.Tensor = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None FUNNEL_START_DOCSTRING = '\n\n The Funnel Transformer model was proposed in [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient\n Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`XxxConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' FUNNEL_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('\n The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called\n decoder) or any task-specific head on top.\n ', FUNNEL_START_DOCSTRING) class TFFunnelBaseModel(TFFunnelPreTrainedModel): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelBaseLayer(config, name='funnel') @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]: return self.funnel(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) def serving_output(self, output): return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) @add_start_docstrings('The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.', FUNNEL_START_DOCSTRING) class TFFunnelModel(TFFunnelPreTrainedModel): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelMainLayer(config, name='funnel') @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small', output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFBaseModelOutput]: return self.funnel(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) def serving_output(self, output): return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) @add_start_docstrings('\n Funnel model with a binary classification head on top as used during pretraining for identifying generated tokens.\n ', FUNNEL_START_DOCSTRING) class TFFunnelForPreTraining(TFFunnelPreTrainedModel): def __init__(self, config: FunnelConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.funnel = TFFunnelMainLayer(config, name='funnel') self.discriminator_predictions = TFFunnelDiscriminatorPredictions(config, name='discriminator_predictions') @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFFunnelForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False, **kwargs) -> Union[Tuple[tf.Tensor], TFFunnelForPreTrainingOutput]: discriminator_hidden_states = self.funnel(input_ids, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) if not return_dict: return (logits,) + discriminator_hidden_states[1:] return TFFunnelForPreTrainingOutput(logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions) def serving_output(self, output): return TFFunnelForPreTrainingOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'discriminator_predictions', None) is not None: with tf.name_scope(self.discriminator_predictions.name): self.discriminator_predictions.build(None) @add_start_docstrings('Funnel Model with a `language modeling` head on top.', FUNNEL_START_DOCSTRING) class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelMainLayer(config, name='funnel') self.lm_head = TFFunnelMaskedLMHead(config, self.funnel.embeddings, name='lm_head') def get_lm_head(self) -> TFFunnelMaskedLMHead: return self.lm_head def get_prefix_bias_name(self) -> str: warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning) return self.name + '/' + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small', output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFMaskedLMOutput]: outputs = self.funnel(input_ids, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[1:] return (loss,) + output if loss is not None else output return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: return TFMaskedLMOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) @add_start_docstrings('\n Funnel Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', FUNNEL_START_DOCSTRING) class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelBaseLayer(config, name='funnel') self.classifier = TFFunnelClassificationHead(config, config.num_labels, name='classifier') @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFSequenceClassifierOutput]: outputs = self.funnel(input_ids, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: return TFSequenceClassifierOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings('\n Funnel Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', FUNNEL_START_DOCSTRING) class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.funnel = TFFunnelBaseLayer(config, name='funnel') self.classifier = TFFunnelClassificationHead(config, 1, name='classifier') @property def dummy_inputs(self): return {'input_ids': tf.ones((3, 3, 4), dtype=tf.int32)} @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small-base', output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFMultipleChoiceModelOutput]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_inputs_embeds = tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None outputs = self.funnel(flat_input_ids, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output, training=training) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput: return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings('\n Funnel Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', FUNNEL_START_DOCSTRING) class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelMainLayer(config, name='funnel') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.classifier = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='classifier') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small', output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFTokenClassifierOutput]: outputs = self.funnel(input_ids, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: return TFTokenClassifierOutput(logits=output.logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'classifier', None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings('\n Funnel Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', FUNNEL_START_DOCSTRING) class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config: FunnelConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.funnel = TFFunnelMainLayer(config, name='funnel') self.qa_outputs = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='funnel-transformer/small', output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, training: bool=False) -> Union[Tuple[tf.Tensor], TFQuestionAnsweringModelOutput]: outputs = self.funnel(input_ids, attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions, 'end_position': end_positions} loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: return TFQuestionAnsweringModelOutput(start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=output.hidden_states, attentions=output.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'funnel', None) is not None: with tf.name_scope(self.funnel.name): self.funnel.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) # File: transformers-main/src/transformers/models/funnel/tokenization_funnel.py """""" import collections import os import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} _model_names = ['small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base'] def load_vocab(vocab_file): vocab = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as reader: tokens = reader.readlines() for (index, token) in enumerate(tokens): token = token.rstrip('\n') vocab[token] = index return vocab def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class FunnelTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES cls_token_type_id: int = 2 def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='', sep_token='', pad_token='', cls_token='', mask_token='', bos_token='', eos_token='', tokenize_chinese_chars=True, strip_accents=None, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = FunnelTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, bos_token=bos_token, eos_token=eos_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for (token, token_index) in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,) class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class WordpieceTokenizer: def __init__(self, vocab, unk_token, max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens # File: transformers-main/src/transformers/models/funnel/tokenization_funnel_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _model_names = ['small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base'] class FunnelTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = FunnelTokenizer cls_token_type_id: int = 2 def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='', sep_token='', pad_token='', cls_token='', mask_token='', bos_token='', eos_token='', clean_text=True, tokenize_chinese_chars=True, strip_accents=None, wordpieces_prefix='##', **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, bos_token=bos_token, eos_token=eos_token, clean_text=clean_text, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, wordpieces_prefix=wordpieces_prefix, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/fuyu/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_fuyu': ['FuyuConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_fuyu'] = ['FuyuImageProcessor'] _import_structure['processing_fuyu'] = ['FuyuProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_fuyu'] = ['FuyuForCausalLM', 'FuyuPreTrainedModel'] if TYPE_CHECKING: from .configuration_fuyu import FuyuConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_fuyu import FuyuImageProcessor from .processing_fuyu import FuyuProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fuyu import FuyuForCausalLM, FuyuPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/fuyu/configuration_fuyu.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class FuyuConfig(PretrainedConfig): model_type = 'fuyu' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=262144, hidden_size=4096, intermediate_size=16384, num_hidden_layers=36, num_attention_heads=64, hidden_act='relu2', max_position_embeddings=16384, image_size=300, patch_size=30, num_channels=3, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, tie_word_embeddings=False, rope_theta=25000.0, rope_scaling=None, qk_layernorm=True, hidden_dropout=0.0, attention_dropout=0.0, partial_rotary_factor=0.5, pad_token_id=None, bos_token_id=1, eos_token_id=2, text_config=None, **kwargs): if text_config is None: text_config = {'vocab_size': vocab_size, 'max_position_embeddings': max_position_embeddings, 'hidden_size': hidden_size, 'intermediate_size': intermediate_size, 'num_hidden_layers': num_hidden_layers, 'num_attention_heads': num_attention_heads, 'hidden_act': hidden_act, 'initializer_range': initializer_range, 'layer_norm_eps': layer_norm_eps, 'use_cache': use_cache, 'rope_theta': rope_theta, 'rope_scaling': rope_scaling, 'qk_layernorm': qk_layernorm, 'hidden_dropout': hidden_dropout, 'attention_dropout': attention_dropout, 'partial_rotary_factor': partial_rotary_factor, 'pad_token_id': pad_token_id, 'bos_token_id': bos_token_id, 'eos_token_id': eos_token_id, 'tie_word_embeddings': tie_word_embeddings} logger.info('text_config is None. initializing the text model with default values.') text_model_type = text_config['model_type'] if 'model_type' in text_config else 'persimmon' self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self._vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.qk_layernorm = qk_layernorm self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.partial_rotary_factor = partial_rotary_factor self._rope_scaling_validation() super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") # File: transformers-main/src/transformers/models/fuyu/convert_fuyu_model_weights_to_hf.py import argparse import os import sys import warnings import flatdict import torch from transformers import FuyuConfig, FuyuForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast tokenizer_class = LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn('The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion') tokenizer_class = LlamaTokenizer '' KEYS_TO_MODIFY_MAPPING = {'self_attention': 'self_attn', 'language_model.encoder': 'language_model.model', 'word_embeddings_for_head': 'language_model.lm_head', 'language_model.embedding.word_embeddings': 'language_model.model.embed_tokens', 'vit_encoder.linear_encoder': 'vision_embed_tokens'} KEYS_TO_REMOVE = {'rotary_emb.inv_freq', 'image_patch_projection', 'image_patch_projection.weight', 'image_patch_projection.bias'} def rename_state_dict(state_dict): model_state_dict = {} for (key, value) in state_dict.items(): for (key_to_modify, new_key) in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if key in KEYS_TO_REMOVE: continue model_state_dict[key] = value return model_state_dict def convert_fuyu_checkpoint(pytorch_dump_folder_path, ada_lib_path, pt_model_path, safe_serialization=False): sys.path.insert(0, ada_lib_path) model_state_dict_base = torch.load(pt_model_path, map_location='cpu') state_dict = flatdict.FlatDict(model_state_dict_base['model'], '.') state_dict = rename_state_dict(state_dict) transformers_config = FuyuConfig() model = FuyuForCausalLM(transformers_config).to(torch.bfloat16) model.load_state_dict(state_dict) model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) transformers_config.save_pretrained(pytorch_dump_folder_path) def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_dir', help='Location of Fuyu weights, which contains tokenizer.model and model folders') parser.add_argument('--pt_model_path', help='Location of Fuyu `model_optim_rng.pt`') parser.add_argument('--output_dir', help='Location to write HF model and tokenizer') parser.add_argument('--ada_lib_path', help='Location of original source code from adept to deserialize .pt checkpoint') parser.add_argument('--safe_serialization', type=bool, help='Whether or not to save using `safetensors`.') args = parser.parse_args() spm_path = os.path.join(args.input_dir, 'adept_vocab.model') convert_fuyu_checkpoint(pytorch_dump_folder_path=args.output_dir, pt_model_path=args.pt_model_path, safe_serialization=args.safe_serialization, ada_lib_path=args.ada_lib_path) tokenizer = tokenizer_class(spm_path, bos_token='|ENDOFTEXT|', eos_token='|ENDOFTEXT|') tokenizer.save_pretrained(args.output_dir) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/fuyu/image_processing_fuyu.py """""" import math from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import pad, resize, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, make_list_of_images, to_numpy_array, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_device, is_torch_dtype, logging, requires_backends if is_torch_available(): import torch logger = logging.get_logger(__name__) def make_list_of_list_of_images(images: Union[List[List[ImageInput]], List[ImageInput], ImageInput]) -> List[List[ImageInput]]: if is_valid_image(images): return [[images]] if isinstance(images, list) and all((isinstance(image, list) for image in images)): return images if isinstance(images, list): return [make_list_of_images(image) for image in images] raise ValueError('images must be a list of list of images or a list of images or an image.') class FuyuBatchFeature(BatchFeature): def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None): if tensor_type is None: return self (is_tensor, as_tensor) = self._get_is_as_tensor_fns(tensor_type=tensor_type) def _convert_tensor(elem): if is_tensor(elem): return elem return as_tensor(elem) def _safe_convert_tensor(elem): try: return _convert_tensor(elem) except: if key == 'overflowing_values': raise ValueError('Unable to create tensor returning overflowing values of different lengths. ') raise ValueError("Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.") for (key, value) in self.items(): if isinstance(value, list) and isinstance(value[0], list): self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value] elif isinstance(value, list): self[key] = [_safe_convert_tensor(elem) for elem in value] else: self[key] = _safe_convert_tensor(value) return self def to(self, *args, **kwargs) -> 'BatchFeature': requires_backends(self, ['torch']) import torch new_data = {} device = kwargs.get('device') if device is None and len(args) > 0: arg = args[0] if is_torch_dtype(arg): pass elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int): device = arg else: raise ValueError(f'Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.') def _to(elem): if torch.is_floating_point(elem): return elem.to(*args, **kwargs) if device is not None: return elem.to(device=device) return elem for (k, v) in self.items(): if isinstance(v, list) and isinstance(v[0], list): new_v = [] for elems in v: new_v.append([_to(elem) for elem in elems]) new_data[k] = new_v elif isinstance(v, list): new_data[k] = [_to(elem) for elem in v] else: new_data[k] = _to(v) self.data = new_data return self class FuyuImageProcessor(BaseImageProcessor): model_input_names = ['images', 'image_input_ids', 'image_patches', 'image_patch_indices_per_batch', 'image_patch_indices_per_subsequence'] def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_pad: bool=True, padding_value: float=1.0, padding_mode: str='constant', do_normalize: bool=True, image_mean: Union[float, List[float]]=0.5, image_std: Union[float, List[float]]=0.5, do_rescale: bool=True, rescale_factor: float=1 / 255, patch_size: Optional[Dict[str, int]]=None, **kwargs): super().__init__(**kwargs) self.do_resize = do_resize self.size = size if size is not None else {'height': 1080, 'width': 1920} self.resample = resample self.do_pad = do_pad self.padding_value = padding_value self.padding_mode = padding_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.patch_size = patch_size if patch_size is not None else {'height': 30, 'width': 30} def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: (image_height, image_width) = get_image_size(image, input_data_format) (target_height, target_width) = (size['height'], size['width']) if image_width <= target_width and image_height <= target_height: return image height_scale_factor = target_height / image_height width_scale_factor = target_width / image_width optimal_scale_factor = min(height_scale_factor, width_scale_factor) new_height = int(image_height * optimal_scale_factor) new_width = int(image_width * optimal_scale_factor) scaled_image = resize(image=image, size=(new_height, new_width), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return scaled_image def pad_image(self, image: np.ndarray, size: Dict[str, int], mode: str='constant', constant_values: float=1.0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (image_height, image_width) = get_image_size(image, input_data_format) (target_height, target_width) = (size['height'], size['width']) padding_top = 0 padding_left = 0 padding_bottom = target_height - image_height padding_right = target_width - image_width padded_image = pad(image, padding=((padding_top, padding_bottom), (padding_left, padding_right)), mode=mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) return padded_image @filter_out_non_signature_kwargs() def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_pad: Optional[bool]=None, padding_value: Optional[float]=None, padding_mode: Optional[str]=None, do_normalize: Optional[bool]=None, image_mean: Optional[float]=None, image_std: Optional[float]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, patch_size: Optional[Dict[str, int]]=None, data_format: Optional[Union[str, ChannelDimension]]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, return_tensors: Optional[TensorType]=None): do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_pad = do_pad if do_pad is not None else self.do_pad do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std padding_value = padding_value if padding_value is not None else self.padding_value padding_mode = padding_mode if padding_mode is not None else self.padding_mode do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor patch_size = patch_size if patch_size is not None else self.patch_size if isinstance(images, list) and any((isinstance(elem, list) and len(elem) >= 2 for elem in images)): raise ValueError('Multiple images for a single sample are not yet supported.') batch_images = make_list_of_list_of_images(images) validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, do_resize=do_resize, size=size, resample=resample) batch_images = [[to_numpy_array(image) for image in images] for images in batch_images] if is_scaled_image(batch_images[0][0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(batch_images[0][0]) original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] if do_resize: batch_images = [[self.resize(image, size=size, input_data_format=input_data_format) for image in images] for images in batch_images] image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] image_scale_factors = [[resized_size[0] / original_size[0]] for (original_size, resized_size) in zip(original_image_sizes, image_sizes)] if do_pad: batch_images = [[self.pad_image(image, size=size, mode=padding_mode, constant_values=padding_value, input_data_format=input_data_format) for image in images] for images in batch_images] if do_rescale: batch_images = [[self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in batch_images] if do_normalize: batch_images = [[self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] for images in batch_images] if data_format is not None: batch_images = [[to_channel_dimension_format(image, data_format, input_data_format) for image in images] for images in batch_images] data = {'images': batch_images, 'image_unpadded_heights': image_unpadded_heights, 'image_unpadded_widths': image_unpadded_widths, 'image_scale_factors': image_scale_factors} return FuyuBatchFeature(data=data, tensor_type=return_tensors) def get_num_patches(self, image_height: int, image_width: int, patch_size: Dict[str, int]=None) -> int: patch_size = patch_size if patch_size is not None else self.patch_size (patch_height, patch_width) = (self.patch_size['height'], self.patch_size['width']) if image_height % patch_height != 0: raise ValueError(f'image_height={image_height!r} must be divisible by {patch_height}') if image_width % patch_width != 0: raise ValueError(f'image_width={image_width!r} must be divisible by {patch_width}') num_patches_per_dim_h = image_height // patch_height num_patches_per_dim_w = image_width // patch_width num_patches = num_patches_per_dim_h * num_patches_per_dim_w return num_patches def patchify_image(self, image: 'torch.Tensor', patch_size: Optional[Dict[str, int]]=None) -> 'torch.Tensor': requires_backends(self, ['torch']) patch_size = patch_size if patch_size is not None else self.patch_size (patch_height, patch_width) = (patch_size['height'], patch_size['width']) (batch_size, channels, _, _) = image.shape unfolded_along_height = image.unfold(2, patch_height, patch_height) patches = unfolded_along_height.unfold(3, patch_width, patch_width) patches = patches.contiguous() patches = patches.view(batch_size, channels, -1, patch_height, patch_width) patches = patches.permute(0, 2, 3, 4, 1) patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width) return patches def preprocess_with_tokenizer_info(self, image_input: 'torch.Tensor', image_present: 'torch.Tensor', image_unpadded_h: 'torch.Tensor', image_unpadded_w: 'torch.Tensor', image_placeholder_id: int, image_newline_id: int, variable_sized: bool, patch_size: Optional[Dict[str, int]]=None) -> FuyuBatchFeature: requires_backends(self, ['torch']) patch_size = patch_size if patch_size is not None else self.patch_size (patch_height, patch_width) = (patch_size['height'], patch_size['width']) images: List[List[torch.Tensor]] = [] batch_image_patches: List[List[torch.Tensor]] = [] batch_image_input_ids: List[List[torch.Tensor]] = [] for batch_index in range(image_input.shape[0]): image_input_ids = [] image_patches = [] for subseq_index in range(image_input.shape[1]): if image_present[batch_index, subseq_index]: image = image_input[batch_index, subseq_index] (image_height, image_width) = (image.shape[1], image.shape[2]) if variable_sized: new_h = min(image_height, math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height) new_w = min(image_width, math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width) image = image[:, :new_h, :new_w] (image_height, image_width) = (new_h, new_w) num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) tensor_of_image_ids = torch.full([num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device) patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) assert num_patches == patches.shape[0] if variable_sized: tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width) newline_ids = torch.full([tensor_of_image_ids.shape[0], 1], image_newline_id, dtype=torch.int32, device=image_input.device) tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1) tensor_of_image_ids = tensor_of_image_ids.reshape(-1) images.append([image]) image_input_ids.append(tensor_of_image_ids) image_patches.append(patches) else: image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device)) batch_image_input_ids.append(image_input_ids) batch_image_patches.append(image_patches) image_patch_indices_per_batch: List[List[torch.Tensor]] = [] image_patch_indices_per_subsequence: List[List[torch.Tensor]] = [] for sample_image_input_ids in batch_image_input_ids: index_offset = 0 per_batch_indices = [] per_subsequence_indices = [] for subseq_image_input_ids in sample_image_input_ids: patches_mask = subseq_image_input_ids == image_placeholder_id num_patches = torch.count_nonzero(patches_mask) indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(subseq_image_input_ids) indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1) indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1) patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0] indices_in_stream_per_batch[patches_inds] = indices + index_offset indices_in_stream_per_subsequence[patches_inds] = indices per_batch_indices.append(indices_in_stream_per_batch) per_subsequence_indices.append(indices_in_stream_per_subsequence) index_offset += num_patches image_patch_indices_per_batch.append(per_batch_indices) image_patch_indices_per_subsequence.append(per_subsequence_indices) return FuyuBatchFeature(data={'images': images, 'image_input_ids': batch_image_input_ids, 'image_patches': batch_image_patches, 'image_patch_indices_per_batch': image_patch_indices_per_batch, 'image_patch_indices_per_subsequence': image_patch_indices_per_subsequence}) # File: transformers-main/src/transformers/models/fuyu/modeling_fuyu.py """""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_fuyu import FuyuConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'FuyuConfig' FUYU_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`FuyuConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Fuyu Model outputting raw hidden-states without any specific head on top.', FUYU_START_DOCSTRING) class FuyuPreTrainedModel(PreTrainedModel): config_class = FuyuConfig base_model_prefix = 'fuyu' supports_gradient_checkpointing = True _no_split_modules = [] _skip_keys_device_placement = 'past_key_values' def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() FUYU_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):\n Image patches to be used as continuous embeddings. The patches are flattened and then projected to the\n hidden size of the model.\n image_patches_indices (`torch.LongTensor` of shape `(batch_size, num_total_patches + number_of_newline_tokens + number_of_text_tokens, patch_size_ x patch_size x num_channels )`, *optional*):\n Indices indicating at which position the image_patches have to be inserted in input_embeds.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.', FUYU_START_DOCSTRING) class FuyuForCausalLM(FuyuPreTrainedModel): def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.text_config.vocab_size self.language_model = AutoModelForCausalLM.from_config(config.text_config, attn_implementation=config._attn_implementation) self.vision_embed_tokens = nn.Linear(config.patch_size * config.patch_size * config.num_channels, config.hidden_size) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_output_embeddings(self): return self.language_model.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def set_decoder(self, decoder): self.language_model.set_decoder(decoder) def get_decoder(self): return self.language_model.get_decoder() def tie_weights(self): return self.language_model.tie_weights() def gather_continuous_embeddings(self, word_embeddings: torch.Tensor, continuous_embeddings: List[torch.Tensor], image_patch_input_indices: torch.Tensor) -> torch.Tensor: if not word_embeddings.shape[0] == len(continuous_embeddings): raise ValueError(f'Batch sizes must match! Got len(continuous_embeddings)={len(continuous_embeddings)!r} and word_embeddings.shape[0]={word_embeddings.shape[0]!r}') output_embeddings = word_embeddings.clone() for batch_idx in range(word_embeddings.shape[0]): dst_indices = torch.nonzero(image_patch_input_indices[batch_idx] >= 0, as_tuple=True)[0] src_indices = image_patch_input_indices[batch_idx][dst_indices] if src_indices.shape[0] > continuous_embeddings[batch_idx].shape[0]: raise ValueError(f'Number of continuous embeddings continuous_embeddings[batch_idx].shape={continuous_embeddings[batch_idx].shape!r} does not match number of continuous token ids src_indices.shape={src_indices.shape!r} in batch element {batch_idx}.') output_embeddings[batch_idx, dst_indices] = continuous_embeddings[batch_idx][src_indices] return output_embeddings @add_start_docstrings_to_model_forward(FUYU_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, image_patches: torch.Tensor=None, image_patches_indices: torch.Tensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = input_ids.shape elif inputs_embeds is not None: (batch_size, seq_length, _) = inputs_embeds.shape else: raise ValueError('You have to specify either input_is or inputs_embeds') seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if image_patches is not None and past_key_values is None: patch_embeddings = [self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)).squeeze(0).to(inputs_embeds.device) for patch in image_patches] inputs_embeds = self.gather_continuous_embeddings(word_embeddings=inputs_embeds, continuous_embeddings=patch_embeddings, image_patch_input_indices=image_patches_indices) outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, labels=labels, use_cache=use_cache, return_dict=return_dict) return outputs def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, image_patches=None, image_patches_indices=None, **kwargs): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} if image_patches_indices is not None: model_inputs['image_patches_indices'] = image_patches_indices model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask, 'image_patches_indices': image_patches_indices if past_key_values is None else None, 'image_patches': image_patches if past_key_values is None else None}) return model_inputs # File: transformers-main/src/transformers/models/fuyu/processing_fuyu.py """""" import re from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, TruncationStrategy from ...utils import TensorType, is_torch_available, logging, requires_backends if is_torch_available(): from .image_processing_fuyu import FuyuBatchFeature logger = logging.get_logger(__name__) if is_torch_available(): import torch TEXT_REPR_BBOX_OPEN = '' TEXT_REPR_BBOX_CLOSE = '' TEXT_REPR_POINT_OPEN = '' TEXT_REPR_POINT_CLOSE = '' TOKEN_BBOX_OPEN_STRING = '<0x00>' TOKEN_BBOX_CLOSE_STRING = '<0x01>' TOKEN_POINT_OPEN_STRING = '<0x02>' TOKEN_POINT_CLOSE_STRING = '<0x03>' BEGINNING_OF_ANSWER_STRING = '<0x04>' def full_unpacked_stream_to_tensor(all_bi_tokens_to_place: List[int], full_unpacked_stream: List['torch.Tensor'], fill_value: int, batch_size: int, new_seq_len: int, offset: int) -> 'torch.Tensor': assert len(all_bi_tokens_to_place) == batch_size assert len(full_unpacked_stream) == batch_size new_padded_tensor = torch.full([batch_size, new_seq_len], fill_value=fill_value, dtype=full_unpacked_stream[0].dtype, device=full_unpacked_stream[0].device) for bi in range(batch_size): tokens_to_place = all_bi_tokens_to_place[bi] new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset:tokens_to_place + offset] return new_padded_tensor def construct_full_unpacked_stream(num_real_text_tokens: Union[List[List[int]], 'torch.Tensor'], input_stream: 'torch.Tensor', image_tokens: List[List['torch.Tensor']], batch_size: int, num_sub_sequences: int) -> List['torch.Tensor']: all_bi_stream = [] for batch_index in range(batch_size): all_si_stream = [] image_adjustment = image_tokens[batch_index][0] subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0) num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0] all_si_stream.append(subsequence_stream[:num_real_tokens]) all_bi_stream.append(torch.cat(all_si_stream, dim=0)) return all_bi_stream def _replace_string_repr_with_token_tags(prompt: str) -> str: prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING) prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING) return prompt def _segment_prompt_into_text_token_conversions(prompt: str) -> List: prompt_text_list: List = [] regex_pattern = re.compile(f'({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})') prompt_split = regex_pattern.split(prompt) for (i, elem) in enumerate(prompt_split): if len(elem) == 0 or elem in [TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING]: continue prompt_text_list.append((elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING])) return prompt_text_list def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]: prompt = _replace_string_repr_with_token_tags(prompt) prompt_text_list = _segment_prompt_into_text_token_conversions(prompt) transformed_prompt_tokens: List[int] = [] for elem in prompt_text_list: if elem[1]: within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer) transformed_prompt_tokens.extend(within_tag_tokenized) else: transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids) return transformed_prompt_tokens def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]: num_int_strs = text.split(',') if len(num_int_strs) == 2: token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING] else: token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING] token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING] num_ints = [float(num.strip()) for num in num_int_strs] if len(num_ints) == 2: num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor) elif len(num_ints) == 4: num_ints_translated = scale_bbox_to_transformed_image(top=num_ints[0], left=num_ints[1], bottom=num_ints[2], right=num_ints[3], scale_factor=scale_factor) else: raise ValueError(f'Invalid number of ints: {len(num_ints)}') tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated] return [token_space_open_string] + tokens + [token_space_close_string] def _tokenize_prompts_with_image_and_batch(tokenizer, prompts: List[List[str]], scale_factors: Optional[List[List['torch.Tensor']]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, add_beginning_of_answer_token: bool) -> Tuple['torch.Tensor', 'torch.Tensor']: if scale_factors is not None: transformed_prompt_tokens = [] for (prompt_seq, scale_factor_seq) in zip(prompts, scale_factors): transformed_prompt_tokens.append([_transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) for (prompt, scale_factor) in zip(prompt_seq, scale_factor_seq)]) else: transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts] prompts_tokens = transformed_prompt_tokens if add_BOS: bos_token = tokenizer.vocab[''] else: bos_token = tokenizer.vocab['|ENDOFTEXT|'] prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens] if add_beginning_of_answer_token: boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING] for token_seq in prompts_tokens: token_seq[-1].append(boa) prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens] max_prompt_len: int = np.max(prompts_length) samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings) if max_prompt_len + max_tokens_to_generate > max_position_embeddings: logger.warning(f'Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}', f'exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.') for (prompt_tokens_seq, prompts_length_seq) in zip(prompts_tokens, prompts_length): for (prompt_tokens, prompt_length) in zip(prompt_tokens_seq, prompts_length_seq): if len(prompt_tokens) > samples_length: raise ValueError('Length of subsequence prompt exceeds sequence length.') padding_size = samples_length - prompt_length prompt_tokens.extend([tokenizer.vocab['|ENDOFTEXT|']] * padding_size) prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64) prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64) return (prompts_tokens_tensor, prompts_length_tensor) def original_to_transformed_h_coords(original_coords, scale_h): return np.round(original_coords * scale_h).astype(np.int32) def original_to_transformed_w_coords(original_coords, scale_w): return np.round(original_coords * scale_w).astype(np.int32) def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]: x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0] y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0] return [x_scaled, y_scaled] def scale_bbox_to_transformed_image(top: float, left: float, bottom: float, right: float, scale_factor: float) -> List[int]: top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0] left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0] bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0] right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0] return [top_scaled, left_scaled, bottom_scaled, right_scaled] class FuyuProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] valid_kwargs = [] image_processor_class = 'FuyuImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor=image_processor, tokenizer=tokenizer) self.image_processor = image_processor self.tokenizer = tokenizer self.max_tokens_to_generate = 10 self.max_position_embeddings = 16384 self.pad_token_id = 0 self.dummy_image_index = -1 def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool): max_length_input_ids = max((entry['input_ids'].shape[1] for entry in model_inputs)) max_length_image_patch_indices = max((entry['image_patches_indices'].shape[1] for entry in model_inputs)) batched_inputs = {'input_ids': [], 'image_patches': [], 'image_patches_indices': [], 'attention_mask': []} for entry in model_inputs: for (key, tensor) in entry.items(): if key == 'input_ids': num_padding_tokens = max_length_input_ids - tensor.shape[1] padded_input_ids = torch.cat([torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long), tensor], dim=1) batched_inputs[key].append(padded_input_ids) attention_mask = torch.cat([torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)], dim=1) batched_inputs['attention_mask'].append(attention_mask) elif key == 'image_patches': batched_inputs[key].append(tensor) else: num_padding_indices = max_length_image_patch_indices - tensor.shape[1] padded_indices = torch.cat([torch.full((tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long), tensor], dim=1) batched_inputs[key].append(padded_indices) batched_keys = ['input_ids', 'image_patches_indices'] if return_attention_mask: batched_keys.append('attention_mask') for key in batched_keys: batched_inputs[key] = torch.cat(batched_inputs[key], dim=0) return batched_inputs def get_sample_encoding(self, prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, image_placeholder_id, image_newline_id, tensor_batch_images): image_present = torch.ones(1, 1, 1) model_image_input = self.image_processor.preprocess_with_tokenizer_info(image_input=tensor_batch_images, image_present=image_present, image_unpadded_h=image_unpadded_heights, image_unpadded_w=image_unpadded_widths, image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, variable_sized=True) (prompt_tokens, prompts_length) = _tokenize_prompts_with_image_and_batch(tokenizer=self.tokenizer, prompts=prompts, scale_factors=scale_factors, max_tokens_to_generate=self.max_tokens_to_generate, max_position_embeddings=self.max_position_embeddings, add_BOS=True, add_beginning_of_answer_token=True) image_padded_unpacked_tokens = construct_full_unpacked_stream(num_real_text_tokens=prompts_length, input_stream=prompt_tokens, image_tokens=model_image_input['image_input_ids'], batch_size=1, num_sub_sequences=self.subsequence_length) unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(num_real_text_tokens=prompts_length, input_stream=torch.full_like(prompt_tokens, -1), image_tokens=model_image_input['image_patch_indices_per_batch'], batch_size=1, num_sub_sequences=self.subsequence_length) max_prompt_length = max((x.shape[-1] for x in image_padded_unpacked_tokens)) max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings) tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0])) image_patch_input_indices = full_unpacked_stream_to_tensor(all_bi_tokens_to_place=[tokens_to_place], full_unpacked_stream=unpacked_image_patch_indices_per_batch, fill_value=-1, batch_size=1, new_seq_len=max_seq_len_batch, offset=0) image_patches_tensor = torch.stack([img[0] for img in model_image_input['image_patches']]) batch_encoding = {'input_ids': image_padded_unpacked_tokens[0].unsqueeze(0), 'image_patches': image_patches_tensor, 'image_patches_indices': image_patch_input_indices} return batch_encoding def __call__(self, text=None, images=None, add_special_tokens: bool=True, return_attention_mask: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_token_type_ids: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> 'FuyuBatchFeature': requires_backends(self, ['torch']) if not return_attention_mask: raise ValueError('`return_attention_mask=False` is not supported for this model.') if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be None.') if text is not None and images is None: logger.warning('You are processing a text with no associated image. Make sure it is intended.') self.current_processor = self.tokenizer text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) return text_encoding if text is None and images is not None: logger.warning('You are processing an image with no associated text. Make sure it is intended.') prompts = [['']] if text is not None and images is not None: if isinstance(text, str): prompts = [[text]] elif isinstance(text, list): prompts = [[text_seq] for text_seq in text] image_encoding = self.image_processor.preprocess(images, return_tensors='pt') batch_images = image_encoding['images'] image_unpadded_heights = image_encoding['image_unpadded_heights'] image_unpadded_widths = image_encoding['image_unpadded_widths'] scale_factors = image_encoding['image_scale_factors'] self.subsequence_length = 1 self.batch_size = len(batch_images) image_placeholder_id = self.tokenizer('|SPEAKER|', add_special_tokens=False)['input_ids'][1] image_newline_id = self.tokenizer('|NEWLINE|', add_special_tokens=False)['input_ids'][1] tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1) all_encodings = [] for (prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image) in zip(prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images): sample_encoding = self.get_sample_encoding(prompts=[prompt], scale_factors=[scale_factor], image_unpadded_heights=torch.tensor([image_unpadded_height]), image_unpadded_widths=torch.tensor([image_unpadded_width]), image_placeholder_id=image_placeholder_id, image_newline_id=image_newline_id, tensor_batch_images=tensor_batch_image.unsqueeze(0)) all_encodings.append(sample_encoding) batch_encoding = self._left_pad_inputs_with_attention_mask(model_inputs=all_encodings, return_attention_mask=return_attention_mask) return FuyuBatchFeature(data=batch_encoding) def post_process_box_coordinates(self, outputs, target_sizes=None): def scale_factor_to_fit(original_size, target_size=None): (height, width) = original_size if target_size is None: max_height = self.image_processor.size['height'] max_width = self.image_processor.size['width'] else: (max_height, max_width) = target_size if width <= max_width and height <= max_height: return 1.0 return min(max_height / height, max_width / width) def find_delimiters_pair(tokens, start_token, end_token): start_id = self.tokenizer.convert_tokens_to_ids(start_token) end_id = self.tokenizer.convert_tokens_to_ids(end_token) starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0] ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0] if torch.any(starting_positions) and torch.any(ending_positions): return (starting_positions[0], ending_positions[0]) return (None, None) def tokens_to_boxes(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (None, None): (start, end) = pair if end != start + 5: continue coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1:end]) scale = scale_factor_to_fit(original_size) (top, left, bottom, right) = [2 * int(float(c) / scale) for c in coords] replacement = f' {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}' replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1:]], 0) return tokens def tokens_to_points(tokens, original_size): while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (None, None): (start, end) = pair if end != start + 3: continue coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1:end]) scale = scale_factor_to_fit(original_size) (x, y) = [2 * int(float(c) / scale) for c in coords] replacement = f' {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}' replacement = self.tokenizer.tokenize(replacement)[1:] replacement = self.tokenizer.convert_tokens_to_ids(replacement) replacement = torch.tensor(replacement).to(tokens) tokens = torch.cat([tokens[:start], replacement, tokens[end + 1:]], 0) return tokens if target_sizes is None: target_sizes = ((self.image_processor.size['height'], self.image_processor.size['width']),) * len(outputs) elif target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') if len(outputs) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as output sequences') results = [] for (seq, size) in zip(outputs, target_sizes): seq = tokens_to_boxes(seq, size) seq = tokens_to_points(seq, size) results.append(seq) return results def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) # File: transformers-main/src/transformers/models/gemma/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_gemma': ['GemmaConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gemma'] = ['GemmaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gemma_fast'] = ['GemmaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gemma'] = ['GemmaForCausalLM', 'GemmaModel', 'GemmaPreTrainedModel', 'GemmaForSequenceClassification', 'GemmaForTokenClassification'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_gemma'] = ['FlaxGemmaForCausalLM', 'FlaxGemmaModel', 'FlaxGemmaPreTrainedModel'] if TYPE_CHECKING: from .configuration_gemma import GemmaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gemma import GemmaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gemma_fast import GemmaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification, GemmaModel, GemmaPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gemma import FlaxGemmaForCausalLM, FlaxGemmaModel, FlaxGemmaPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gemma/configuration_gemma.py from transformers import PretrainedConfig class GemmaConfig(PretrainedConfig): model_type = 'gemma' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) # File: transformers-main/src/transformers/models/gemma/convert_gemma_weights_to_hf.py import argparse import os import warnings import torch from accelerate import init_empty_weights from transformers import GemmaConfig, GemmaForCausalLM, GemmaTokenizer try: from transformers import GemmaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn('The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion') GemmaTokenizerFast = None '' gemma_2b_config = GemmaConfig(num_hidden_layers=18, num_attention_heads=8, num_key_value_heads=1, hidden_size=2048, intermediate_size=16384) gemma_7b_config = GemmaConfig() CONFIG_MAPPING = {'2B': gemma_2b_config, '7B': gemma_7b_config} LAYER_NAME_MAPPING = {'embedder.weight': 'model.embed_tokens.weight'} def write_model(save_path, input_base_path, config, safe_serialization=True, push_to_hub=False, dtype=torch.float32): num_attn_heads = config.num_attention_heads hidden_size = config.hidden_size num_kv_heads = config.num_key_value_heads head_dim = config.head_dim print(f"Fetching all parameters from the checkpoint at '{input_base_path}'") model_state_dict = torch.load(input_base_path, map_location='cpu')['model_state_dict'] model_state_dict.pop('freqs_cis') state_dict = {} for (k, v) in model_state_dict.items(): if 'qkv_proj' in k: if num_kv_heads == 1: v = v.reshape(num_attn_heads + num_kv_heads * 2, head_dim, hidden_size) q_proj = v[:num_attn_heads, ...] k_proj = v[num_attn_heads:num_attn_heads + num_kv_heads, ...].repeat(num_kv_heads, 1, 1) v_proj = v[-num_kv_heads:, ...].repeat(num_kv_heads, 1, 1) state_dict[k.replace('qkv_proj', 'q_proj')] = q_proj.reshape(num_attn_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'k_proj')] = k_proj.reshape(num_kv_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'v_proj')] = v_proj[0].clone() else: (q_proj, k_proj, v_proj) = torch.split(v, v.shape[0] // 3, 0) state_dict[k.replace('qkv_proj', 'q_proj')] = q_proj.reshape(num_attn_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'k_proj')] = k_proj.reshape(num_kv_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'v_proj')] = v_proj.clone() elif k == 'embedder.weight': state_dict[LAYER_NAME_MAPPING[k]] = v state_dict['lm_head.weight'] = v else: state_dict[k] = v torch.set_default_dtype(dtype) print('Loading the checkpoint in a Gemma model.') with init_empty_weights(): model = GemmaForCausalLM(config) model.load_state_dict(state_dict, assign=True, strict=False) model.config.torch_dtype = torch.float32 del model.config._name_or_path print('Saving in the Transformers format.') if push_to_hub: print(f'pushing the model to {save_path}') model.push_to_hub(save_path, safe_serialization=safe_serialization, private=True) else: model.save_pretrained(save_path, safe_serialization=safe_serialization) def write_tokenizer(input_tokenizer_path, save_path, push_to_hub=False): tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {save_path}.') tokenizer = tokenizer_class(input_tokenizer_path) if push_to_hub: tokenizer.push_to_hub(save_path) else: tokenizer.save_pretrained(save_path) def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_checkpoint', help='Absolute path to the target Gemma weights.', required=True) parser.add_argument('--tokenizer_checkpoint', help='Location of Gemma tokenizer model') parser.add_argument('--model_size', default='7B', choices=['2B', '7B', 'tokenizer_only'], help="'f' models correspond to the finetuned versions, and are specific to the Gemma2 official release. For more details on Gemma2, checkout the original repo: https://huggingface.co/google/gemma-7b") parser.add_argument('--output_dir', default='google/gemma-7b', help='Location to write HF model and tokenizer') parser.add_argument('--pickle_serialization', help='Whether or not to save using `safetensors`.', action='store_true', default=False) parser.add_argument('--convert_tokenizer', help='Whether or not to convert the tokenizer as well.', action='store_true', default=False) parser.add_argument('--push_to_hub', help='Whether or not to push the model to the hub at `output_dir` instead of saving it locally.', action='store_true', default=False) parser.add_argument('--dtype', default='float32', help='Target dtype of the converted model') args = parser.parse_args() if args.convert_tokenizer: if args.tokenizer_checkpoint is None: raise ValueError('Path to the tokenizer is required when passing --convert_tokenizer') spm_path = os.path.join(args.tokenizer_checkpoint) write_tokenizer(spm_path, args.output_dir, args.push_to_hub) config = CONFIG_MAPPING[args.model_size] dtype = getattr(torch, args.dtype) write_model(config=config, input_base_path=args.input_checkpoint, save_path=args.output_dir, safe_serialization=not args.pickle_serialization, push_to_hub=args.push_to_hub, dtype=dtype) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/gemma/diff_gemma.py import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from transformers import PretrainedConfig from transformers.models.llama.modeling_llama import LlamaFlashAttention2, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, apply_rotary_pos_emb, repeat_kv from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import CausalLMOutputWithPast from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import logging logger = logging.get_logger(__name__) class GemmaConfig(PretrainedConfig): model_type = 'gemma' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class GemmaRMSNorm(nn.Module): def __init__(self, dim: int, eps: float=1e-06): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.eps}' ALL_LAYERNORM_LAYERS.append(GemmaRMSNorm) class GemmaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim) self.register_buffer('inv_freq', tensor=inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids, seq_len=None): self.inv_freq.to(x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) class GemmaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) if config.hidden_activation is None: logger.warning_once("`config.hidden_act` is ignored, you should use `config.hidden_activation` instead.\nGemma's activation function will be set to `gelu_pytorch_tanh`. Please, use\n`config.hidden_activation` if you want to override this behaviour.\nSee https://github.com/huggingface/transformers/pull/29402 for more details.") config.hidden_activation = 'gelu_pytorch_tanh' hidden_activation = config.hidden_activation self.act_fn = ACT2FN[hidden_activation] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class GemmaAttention(nn.Module): def __init__(self, config: GemmaConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.scaling = 1 / math.sqrt(config.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.rotary_emb = GemmaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GemmaFlashAttention2(LlamaFlashAttention2): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if isinstance(past_key_value, StaticCache): raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers') output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GemmaModel(LlamaModel): def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) return_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds normalizer = torch.tensor(self.config.hidden_size ** 0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer return super().forward(causal_mask, position_ids, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict, cache_position, input_ids=None, inputs_embeds=hidden_states) class GemmaForCausalLM(LlamaForCausalLM): def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class GemmaForSequenceClassification(LlamaForSequenceClassification): pass class GemmaForTokenClassification(LlamaForTokenClassification): pass # File: transformers-main/src/transformers/models/gemma/modeling_flax_gemma.py """""" from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gemma import GemmaConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GemmaConfig' _CHECKPOINT_FOR_DOC = 'google/gemma-2b' _REAL_CHECKPOINT_FOR_DOC = 'openlm-research/open_llama_3b_v2' GEMMA_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`GemmaConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or\n `jax.numpy.bfloat16`.\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' GEMMA_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def create_sinusoidal_positions(num_pos, dim): inv_freq = 1.0 / 10000 ** (np.arange(0, dim, 2)[:dim // 2] / dim) freqs = np.einsum('i , j -> i j', np.arange(num_pos), inv_freq).astype('float32') emb = np.concatenate((freqs, freqs), axis=-1) out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1) return jnp.array(out[:, :, :num_pos]) def rotate_half(tensor): rotate_half_tensor = jnp.concatenate((-tensor[..., tensor.shape[-1] // 2:], tensor[..., :tensor.shape[-1] // 2]), axis=-1) return rotate_half_tensor def apply_rotary_pos_emb(tensor, sin_pos, cos_pos): return tensor * cos_pos + rotate_half(tensor) * sin_pos class FlaxGemmaRMSNorm(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.epsilon = self.config.rms_norm_eps self.weight = self.param('weight', lambda _, shape: jnp.ones(shape), self.config.hidden_size) def __call__(self, hidden_states): variance = jnp.asarray(hidden_states, dtype=jnp.float32) variance = jnp.power(variance, 2) variance = variance.mean(-1, keepdims=True) hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon) return (1 + self.weight) * jnp.asarray(hidden_states, dtype=self.dtype) class FlaxGemmaRotaryEmbedding(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): head_dim = self.config.head_dim self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim) def __call__(self, key, query, position_ids): sincos = self.sincos[position_ids] (sin_pos, cos_pos) = jnp.split(sincos, 2, axis=-1) key = apply_rotary_pos_emb(key, sin_pos, cos_pos) query = apply_rotary_pos_emb(query, sin_pos, cos_pos) key = jnp.asarray(key, dtype=self.dtype) query = jnp.asarray(query, dtype=self.dtype) return (key, query) class FlaxGemmaAttention(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 causal: bool = True is_cross_attention: bool = False def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads kernel = jax.nn.initializers.normal(self.config.initializer_range) self.q_proj = nn.Dense(self.num_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel) self.k_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel) self.v_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel) self.o_proj = nn.Dense(self.embed_dim, use_bias=config.attention_bias, dtype=self.dtype, kernel_init=kernel) self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype='bool'), dtype='bool') self.rotary_emb = FlaxGemmaRotaryEmbedding(config, dtype=self.dtype) def _split_heads(self, hidden_states, num_heads): return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads * self.head_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask, position_ids, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads) key = self._split_heads(key, self.num_key_value_heads) value = self._split_heads(value, self.num_key_value_heads) (key, query) = self.rotary_emb(key, query, position_ids) (query_length, key_length) = (query.shape[1], key.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng('dropout') if self.has_variable('cache', 'cached_key') or init_cache: (key, value, attention_mask) = self._concatenate_to_cache(key, value, query, attention_mask) attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) key = jnp.repeat(key, repeats=self.num_key_value_groups, axis=2) value = jnp.repeat(value, repeats=self.num_key_value_groups, axis=2) attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=attention_dtype) if self.attention_softmax_in_fp32: attn_weights = attn_weights.astype(self.dtype) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.o_proj(attn_output) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGemmaMLP(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim kernel_init = jax.nn.initializers.normal(self.config.initializer_range) if self.config.hidden_activation is None: logger.warning_once(f"Gemma's activation function should be approximate GeLU and not exact GeLU. Changing the activation function to `gelu_pytorch_tanh`.if you want to use the legacy `{self.config.hidden_act}`, edit the `model.config` to set `hidden_activation={self.config.hidden_act}` instead of `hidden_act`. See https://github.com/huggingface/transformers/pull/29402 for more details.") hidden_activation = 'gelu_pytorch_tanh' else: hidden_activation = self.config.hidden_activation self.act = ACT2FN[hidden_activation] self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) def __call__(self, hidden_states): up_proj_states = self.up_proj(hidden_states) gate_states = self.act(self.gate_proj(hidden_states)) hidden_states = self.down_proj(up_proj_states * gate_states) return hidden_states class FlaxGemmaDecoderLayer(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.input_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) self.self_attn = FlaxGemmaAttention(self.config, dtype=self.dtype) self.post_attention_layernorm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) self.mlp = FlaxGemmaMLP(self.config, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) outputs = self.self_attn(hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) attn_output = outputs[0] hidden_states = residual + attn_output residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return (hidden_states,) + outputs[1:] class FlaxGemmaPreTrainedModel(FlaxPreTrainedModel): config_class = GemmaConfig base_model_prefix = 'model' module_class: nn.Module = None def __init__(self, config: GemmaConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def __call__(self, input_ids, attention_mask=None, position_ids=None, params: dict=None, past_key_values: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict (batch_size, sequence_length) = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `position_ids` when passing `past_key_values`.') position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxGemmaLayerCollection(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [FlaxGemmaDecoderLayer(self.config, dtype=self.dtype, name=str(i)) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block(hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGemmaModule(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.hidden_size = self.config.hidden_size embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) self.embed_tokens = nn.Embed(self.config.vocab_size, self.hidden_size, embedding_init=embedding_init, dtype=self.dtype) self.layers = FlaxGemmaLayerCollection(self.config, dtype=self.dtype) self.norm = FlaxGemmaRMSNorm(self.config, dtype=self.dtype) def __call__(self, input_ids, attention_mask=None, position_ids=None, deterministic=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): input_embeds = self.embed_tokens(input_ids.astype('i4')) input_embeds = input_embeds * self.config.hidden_size ** 0.5 outputs = self.layers(input_embeds, position_ids=position_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1]) @add_start_docstrings('The bare Gemma Model transformer outputting raw hidden-states without any specific head on top.', GEMMA_START_DOCSTRING) class FlaxGemmaModel(FlaxGemmaPreTrainedModel): module_class = FlaxGemmaModule append_call_sample_docstring(FlaxGemmaModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) class FlaxGemmaForCausalLMModule(nn.Module): config: GemmaConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.model = FlaxGemmaModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask=None, position_ids=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.model(input_ids, position_ids=position_ids, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.model.variables['params']['embed_tokens']['embedding'].T lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n The Gemma Model transformer with a language modeling head (linear layer) on top.\n ', GEMMA_START_DOCSTRING) class FlaxGemmaForCausalLM(FlaxGemmaPreTrainedModel): module_class = FlaxGemmaForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGemmaForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) # File: transformers-main/src/transformers/models/gemma/modeling_gemma.py import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_greater_or_equal_2_10, is_torchdynamo_compiling, logging, replace_return_docstrings from .configuration_gemma import GemmaConfig logger = logging.get_logger(__name__) def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class GemmaRMSNorm(nn.Module): def __init__(self, dim: int, eps: float=1e-06): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.eps}' ALL_LAYERNORM_LAYERS.append(GemmaRMSNorm) class GemmaRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim) self.register_buffer('inv_freq', tensor=inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids, seq_len=None): self.inv_freq.to(x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) class GemmaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) if config.hidden_activation is None: logger.warning_once("`config.hidden_act` is ignored, you should use `config.hidden_activation` instead.\nGemma's activation function will be set to `gelu_pytorch_tanh`. Please, use\n`config.hidden_activation` if you want to override this behaviour.\nSee https://github.com/huggingface/transformers/pull/29402 for more details.") config.hidden_activation = 'gelu_pytorch_tanh' hidden_activation = config.hidden_activation self.act_fn = ACT2FN[hidden_activation] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class GemmaLinearScalingRotaryEmbedding(GemmaRotaryEmbedding): def forward(self, x, position_ids): position_ids = position_ids.float() / self.scaling_factor (cos, sin) = super().forward(x, position_ids) return (cos, sin) class GemmaDynamicNTKScalingRotaryEmbedding(GemmaRotaryEmbedding): def forward(self, x, position_ids): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_position_embeddings: base = self.base * (self.scaling_factor * seq_len / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) (cos, sin) = super().forward(x, position_ids) return (cos, sin) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class GemmaAttention(nn.Module): def __init__(self, config: GemmaConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.scaling = 1 / math.sqrt(config.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.rotary_emb = GemmaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GemmaFlashAttention2(GemmaAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if isinstance(past_key_value, StaticCache): raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers') output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GemmaSdpaAttention(GemmaAttention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('GemmaModel is using GemmaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return (attn_output, None, past_key_value) GEMMA_ATTENTION_CLASSES = {'eager': GemmaAttention, 'flash_attention_2': GemmaFlashAttention2, 'sdpa': GemmaSdpaAttention} class GemmaDecoderLayer(nn.Module): def __init__(self, config: GemmaConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = GEMMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = GemmaMLP(config) self.input_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs GEMMA_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GemmaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Gemma Model outputting raw hidden-states without any specific head on top.', GEMMA_START_DOCSTRING) class GemmaPreTrainedModel(PreTrainedModel): config_class = GemmaConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['GemmaDecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() _CONFIG_FOR_DOC = 'GemmaConfig' GEMMA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare Gemma Model outputting raw hidden-states without any specific head on top.', GEMMA_START_DOCSTRING) class GemmaModel(GemmaPreTrainedModel): def __init__(self, config: GemmaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) return_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds normalizer = torch.tensor(self.config.hidden_size ** 0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position) else: layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask class GemmaForCausalLM(GemmaPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = GemmaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, num_logits_to_keep: int=0) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] if labels is None and (not is_torchdynamo_compiling()): logger.warning_once('Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)') logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]).float() loss = None if labels is not None: logits = logits.float() shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, num_logits_to_keep=None, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) if num_logits_to_keep is not None: model_inputs['num_logits_to_keep'] = num_logits_to_keep model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings('\n The Gemma Model transformer with a sequence classification head on top (linear layer).\n\n [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GEMMA_START_DOCSTRING) class GemmaForSequenceClassification(GemmaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = GemmaModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The Gemma Model transformer with a token classification head on top (a linear layer on top of the hidden-states\n output) e.g. for Named-Entity-Recognition (NER) tasks.\n ', GEMMA_START_DOCSTRING) class GemmaForTokenClassification(GemmaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = GemmaModel(config) if getattr(config, 'classifier_dropout', None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, 'hidden_dropout', None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.score = nn.Linear(config.hidden_size, config.num_labels) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.score(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gemma/tokenization_gemma.py """""" import os from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: pass logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'} SPIECE_UNDERLINE = '▁' class GemmaTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, unk_token='', bos_token='', eos_token='', pad_token='', sp_model_kwargs: Optional[Dict[str, Any]]=None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, **kwargs): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.use_default_system_prompt = use_default_system_prompt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=self.sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, use_default_system_prompt=use_default_system_prompt, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text, **kwargs): return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): token = self.sp_model.IdToPiece(index) return token def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, spaces_between_special_tokens: bool=False, **kwargs) -> str: sub_texts = [] current_sub_text = [] for ids in token_ids: if skip_special_tokens and ids in self.all_special_ids: continue if ids in self._added_tokens_decoder: if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) sub_texts.append(self._added_tokens_decoder[ids].content) current_sub_text = [] else: current_sub_text.append(ids) if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) if spaces_between_special_tokens: sub_texts = ' '.join(sub_texts) else: sub_texts = ''.join(sub_texts) return sub_texts.replace(SPIECE_UNDERLINE, ' ') def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' for token in tokens: if token in self._added_tokens_encoder: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string def save_vocabulary(self, save_directory, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output # File: transformers-main/src/transformers/models/gemma/tokenization_gemma_fast.py import os from shutil import copyfile from typing import Optional, Tuple from tokenizers import processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging from ...utils.versions import require_version require_version('tokenizers>=0.13.3') if is_sentencepiece_available(): from .tokenization_gemma import GemmaTokenizer else: GemmaTokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model', 'tokenizer_file': 'tokenizer.json'} class GemmaTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = GemmaTokenizer padding_side = 'left' model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='', bos_token='', eos_token='', pad_token='', add_bos_token=True, add_eos_token=False, **kwargs): super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, **kwargs) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def update_post_processor(self): bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError('add_bos_token = True but bos_token = None') eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError('add_eos_token = True but eos_token = None') single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}" pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.') if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output # File: transformers-main/src/transformers/models/gemma2/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_gemma2': ['Gemma2Config']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gemma2'] = ['Gemma2ForCausalLM', 'Gemma2Model', 'Gemma2PreTrainedModel', 'Gemma2ForSequenceClassification', 'Gemma2ForTokenClassification'] if TYPE_CHECKING: from .configuration_gemma2 import Gemma2Config try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gemma2 import Gemma2ForCausalLM, Gemma2ForSequenceClassification, Gemma2ForTokenClassification, Gemma2Model, Gemma2PreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gemma2/configuration_gemma2.py from transformers import PretrainedConfig class Gemma2Config(PretrainedConfig): model_type = 'gemma2' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=256000, hidden_size=3072, intermediate_size=24576, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_activation='gelu_pytorch_tanh', max_position_embeddings=8192, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, final_logit_softcapping=30.0, attn_logit_softcapping=50.0, query_pre_attn_scalar=224, sliding_window=4096, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.hidden_activation = hidden_activation self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.attn_logit_softcapping = attn_logit_softcapping super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) self.final_logit_softcapping = final_logit_softcapping self.query_pre_attn_scalar = query_pre_attn_scalar self.sliding_window = sliding_window self.cache_implementation = 'hybrid' # File: transformers-main/src/transformers/models/gemma2/convert_gemma2_weights_to_hf.py import argparse import os import warnings import torch from accelerate import init_empty_weights from transformers import Gemma2Config, Gemma2ForCausalLM, GemmaTokenizer try: from transformers import GemmaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn('The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion') GemmaTokenizerFast = None '' gemma_9b_config = Gemma2Config(num_hidden_layers=42, num_attention_heads=16, num_key_value_heads=8, hidden_size=3584, intermediate_size=14336, final_logit_softcapping=30.0, attn_logit_softcapping=50.0, head_dim=256, sliding_window=4096, query_pre_attn_scalar=224) gemma_27b_config = Gemma2Config(num_hidden_layers=46, num_attention_heads=32, num_key_value_heads=16, hidden_size=4608, intermediate_size=36864, final_logit_softcapping=30.0, attn_logit_softcapping=50.0, head_dim=128, sliding_window=4096, query_pre_attn_scalar=144) CONFIG_MAPPING = {'9B': gemma_9b_config, '27B': gemma_27b_config} LAYER_NAME_MAPPING = {'embedder.weight': 'model.embed_tokens.weight'} def write_model(save_path, input_base_path, config, safe_serialization=True, push_to_hub=False, dtype=torch.float32): num_attn_heads = config.num_attention_heads hidden_size = config.hidden_size num_kv_heads = config.num_key_value_heads head_dim = config.head_dim print(f"Fetching all parameters from the checkpoint at '{input_base_path}'") if os.path.isdir(input_base_path): print('Model seems sharded') model_state_dict = {} files = [file for file in os.listdir(input_base_path) if file.endswith('.bin')] for file in files: print(file) loaded_state_dict = torch.load(os.path.join(input_base_path, file), map_location='cpu') model_state_dict.update(loaded_state_dict) else: print('Model does not seem to be sharded') model_state_dict = torch.load(input_base_path, map_location='cpu')['model_state_dict'] model_state_dict.pop('freqs_cis') state_dict = {} for (k, v) in model_state_dict.items(): if 'qkv_proj' in k: if num_kv_heads == 1: v = v.reshape(num_attn_heads + num_kv_heads * 2, head_dim, hidden_size) q_proj = v[:num_attn_heads, ...] k_proj = v[num_attn_heads:num_attn_heads + num_kv_heads, ...].repeat(num_kv_heads, 1, 1) v_proj = v[-num_kv_heads:, ...].repeat(num_kv_heads, 1, 1) state_dict[k.replace('qkv_proj', 'q_proj')] = q_proj.reshape(num_attn_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'k_proj')] = k_proj.reshape(num_kv_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'v_proj')] = v_proj[0].clone() else: (q_proj, k_proj, v_proj) = torch.split(v, [num_attn_heads * head_dim, num_kv_heads * head_dim, num_kv_heads * head_dim], 0) state_dict[k.replace('qkv_proj', 'q_proj')] = q_proj.reshape(num_attn_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'k_proj')] = k_proj.reshape(num_kv_heads * head_dim, hidden_size).clone() state_dict[k.replace('qkv_proj', 'v_proj')] = v_proj.reshape(num_kv_heads * head_dim, hidden_size).clone() elif k == 'embedder.weight': state_dict[LAYER_NAME_MAPPING[k]] = v state_dict['lm_head.weight'] = v else: state_dict[k] = v torch.set_default_dtype(dtype) print('Loading the checkpoint in a Gemma2 model.') with init_empty_weights(): model = Gemma2ForCausalLM(config) model.load_state_dict(state_dict, assign=True, strict=False) model.config.torch_dtype = torch.float32 del model.config._name_or_path print('Saving in the Transformers format.') if push_to_hub: print(f'pushing the model to {save_path}') model.push_to_hub(save_path, safe_serialization=safe_serialization, private=True) else: model.save_pretrained(save_path, safe_serialization=safe_serialization) def write_tokenizer(input_tokenizer_path, save_path, push_to_hub=False): tokenizer_class = GemmaTokenizer if GemmaTokenizerFast is None else GemmaTokenizerFast print(f'Saving a {tokenizer_class.__name__} to {save_path}.') tokenizer = tokenizer_class(input_tokenizer_path) if push_to_hub: tokenizer.push_to_hub(save_path) else: tokenizer.save_pretrained(save_path) def main(): parser = argparse.ArgumentParser() parser.add_argument('--input_checkpoint', help='Absolute path to the target Gemma2 weights.', required=True) parser.add_argument('--tokenizer_checkpoint', help='Location of Gemma2 tokenizer model') parser.add_argument('--model_size', default='9B', choices=['9B', '27B', 'tokenizer_only'], help="'f' models correspond to the finetuned versions, and are specific to the Gemma22 official release. For more details on Gemma2, checkout the original repo: https://huggingface.co/google/gemma-7b") parser.add_argument('--output_dir', default='google/gemma-9b', help='Location to write HF model and tokenizer') parser.add_argument('--pickle_serialization', help='Whether or not to save using `safetensors`.', action='store_true', default=False) parser.add_argument('--convert_tokenizer', help='Whether or not to convert the tokenizer as well.', action='store_true', default=False) parser.add_argument('--push_to_hub', help='Whether or not to push the model to the hub at `output_dir` instead of saving it locally.', action='store_true', default=False) parser.add_argument('--dtype', default='float32', help='Target dtype of the converted model') args = parser.parse_args() if args.convert_tokenizer: if args.tokenizer_checkpoint is None: raise ValueError('Path to the tokenizer is required when passing --convert_tokenizer') spm_path = os.path.join(args.tokenizer_checkpoint) write_tokenizer(spm_path, args.output_dir, args.push_to_hub) if not args.model_size == 'tokenizer_only': config = CONFIG_MAPPING[args.model_size] dtype = getattr(torch, args.dtype) write_model(config=config, input_base_path=args.input_checkpoint, save_path=args.output_dir, safe_serialization=not args.pickle_serialization, push_to_hub=args.push_to_hub, dtype=dtype) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/gemma2/diff_gemma2.py from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch.nn import CrossEntropyLoss from transformers.models.gemma.configuration_gemma import GemmaConfig from transformers.models.gemma.modeling_gemma import GemmaAttention, GemmaDecoderLayer, GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification, GemmaModel, GemmaRMSNorm, apply_rotary_pos_emb, repeat_kv from ...cache_utils import Cache from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) class Gemma2Config(GemmaConfig): cache_implementation = 'hybrid' def __init__(self, query_pre_attn_scalar=224, sliding_window=4096, final_logit_softcapping=30.0, **super_kwargs): super().__init__(self, **super_kwargs) self.query_pre_attn_scalar = query_pre_attn_scalar self.sliding_window = sliding_window self.cache_implementation = 'hybrid' self.final_logit_softcapping = final_logit_softcapping class Gemma2RMSNorm(GemmaRMSNorm): pass class Gemma2Attention(GemmaAttention): def __init__(self, config: Gemma2Config, layer_idx: Optional[int]=None): super().__init__(config, layer_idx) self.scaling = config.query_pre_attn_scalar ** (-0.5) class Gemma2FlashAttention2(Gemma2Attention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, softmax_scale=self.scaling, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class Gemma2SdpaAttention(Gemma2Attention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('Gemma2Model is using Gemma2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, scale=self.scaling) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return (attn_output, None, past_key_value) class Gemma2DecoderLayer(GemmaDecoderLayer): def __init__(self, config: Gemma2Config, layer_idx: int): super().__init__(config, layer_idx) self.is_sliding = bool(layer_idx % 2) self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.sliding_window = config.sliding_window def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if self.is_sliding and attention_mask is not None: attention_mask = attention_mask * torch.tril(torch.ones_like(attention_mask), diagonal=self.sliding_window - cache_position[-1]) if cache_position[0] > 0: attention_mask = attention_mask[:, -self.sliding_window:] residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_feedforward_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class Gemma2Model(GemmaModel): def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: cache_position = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds normalizer = torch.tensor(self.config.hidden_size ** 0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position) else: layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = past_key_values if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) @torch.no_grad() def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if past_key_values is not None: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class Gemma2ForCausalLM(GemmaForCausalLM): def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] logits = self.lm_head(hidden_states) if self.config.final_logit_softcapping is not None: logits = logits / self.config.final_logit_softcapping logits = torch.tanh(logits) logits = logits * self.config.final_logit_softcapping logits = logits.float() loss = None if labels is not None: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class Gemma2ForSequenceClassification(GemmaForSequenceClassification): pass class Gemma2ForTokenClassification(GemmaForTokenClassification): pass # File: transformers-main/src/transformers/models/gemma2/modeling_gemma2.py from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, HybridCache from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal, is_flash_attn_greater_or_equal_2_10, is_torchdynamo_compiling, logging, replace_return_docstrings from .configuration_gemma2 import Gemma2Config if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class Gemma2RMSNorm(nn.Module): def __init__(self, dim: int, eps: float=1e-06): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.eps}' class Gemma2RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim) self.register_buffer('inv_freq', tensor=inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids, seq_len=None): self.inv_freq.to(x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class Gemma2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_activation] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class Gemma2Attention(nn.Module): def __init__(self, config: Gemma2Config, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.scaling = config.query_pre_attn_scalar ** (-0.5) if self.hidden_size % self.num_heads != 0: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.rotary_emb = Gemma2RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) self.sliding_window = config.sliding_window if not bool(layer_idx % 2) else None def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'sliding_window': self.sliding_window, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if self.config.attn_logit_softcapping is not None: attn_weights = attn_weights / self.config.attn_logit_softcapping attn_weights = torch.tanh(attn_weights) attn_weights = attn_weights * self.config.attn_logit_softcapping if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class Gemma2FlashAttention2(Gemma2Attention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'sliding_window': self.sliding_window, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if attention_mask is not None: seq_len = attention_mask.shape[1] key_states = key_states[:, :, :seq_len] value_states = value_states[:, :, :seq_len] query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, softmax_scale=self.scaling, is_causal=self.is_causal, sliding_window=self.sliding_window, use_top_left_mask=self._flash_attn_uses_top_left_mask, softcap=self.config.attn_logit_softcapping if is_flash_attn_greater_or_equal('2.6.0') else None) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class Gemma2SdpaAttention(Gemma2Attention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('Gemma2Model is using Gemma2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = self.rotary_emb(value_states, position_ids) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'sliding_window': self.sliding_window, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, scale=self.scaling) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return (attn_output, None, past_key_value) GEMMA2_ATTENTION_CLASSES = {'eager': Gemma2Attention, 'flash_attention_2': Gemma2FlashAttention2, 'sdpa': Gemma2SdpaAttention} class Gemma2DecoderLayer(nn.Module): def __init__(self, config: Gemma2Config, layer_idx: int): super().__init__() self.config = config self.hidden_size = config.hidden_size self.self_attn = GEMMA2_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = Gemma2MLP(config) self.input_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.is_sliding = not bool(layer_idx % 2) self.pre_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.sliding_window = config.sliding_window def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if self.is_sliding and attention_mask is not None: if self.config._attn_implementation == 'flash_attention_2': if past_key_value is not None: attention_mask = attention_mask[:, -self.sliding_window:] else: min_dtype = torch.finfo(hidden_states.dtype).min sliding_window_mask = torch.tril(torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.sliding_window) attention_mask = torch.where(sliding_window_mask, min_dtype, attention_mask) if attention_mask.shape[-1] <= 1: attention_mask = attention_mask[:, :, :, -self.sliding_window:] residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.pre_feedforward_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs GEMMA2_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`Gemma2Config`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Gemma2 Model outputting raw hidden-states without any specific head on top.', GEMMA2_START_DOCSTRING) class Gemma2PreTrainedModel(PreTrainedModel): config_class = Gemma2Config base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['Gemma2DecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = False _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False): config = super()._check_and_enable_sdpa(config, hard_check_only=hard_check_only) if not hard_check_only and config._attn_implementation == 'sdpa': config._attn_implementation = 'eager' return config _CONFIG_FOR_DOC = 'Gemma2Config' GEMMA2_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare Gemma2 Model outputting raw hidden-states without any specific head on top.', GEMMA2_START_DOCSTRING) class Gemma2Model(Gemma2PreTrainedModel): def __init__(self, config: Gemma2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([Gemma2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = Gemma2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: if past_key_values is None: cache_position = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device) else: raise ValueError('When `past_key_values` is passed, `cache_position` must be too') if use_cache and past_key_values is None and (not self.training): logger.warning_once("You are calling the model with `use_cache=True` but didn't pass `past_key_values` while not training. ", 'If you want to compute with cache, make sure to pass an instance of `HybridCache`. An empty `HybridCache` instance will be created for this call. See for more: (https://huggingface.co/docs/transformers/main/en/internal/generation_utils#transformers.HybridCache)') (batch_size, seq_len, _) = inputs_embeds.shape past_key_values = HybridCache(self.config, batch_size=batch_size, max_cache_len=seq_len, device=self.device, dtype=inputs_embeds.dtype) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds normalizer = torch.tensor(self.config.hidden_size ** 0.5, dtype=hidden_states.dtype) hidden_states = hidden_states * normalizer all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position) else: layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = past_key_values if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': return attention_mask (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if isinstance(past_key_values, HybridCache): target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if attention_mask is not None else input_tensor.shape[1] causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) return causal_mask class Gemma2ForCausalLM(Gemma2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = Gemma2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, num_logits_to_keep: int=0) -> Union[Tuple, CausalLMOutputWithPast]: if self.training and self.config._attn_implementation != 'eager': logger.warning_once(f"It is strongly recommended to train Gemma2 models with the `eager` attention implementation instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('', attn_implementation='eager')`.") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] if labels is None and (not is_torchdynamo_compiling()): logger.warning_once('Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)') logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]) if self.config.final_logit_softcapping is not None: logits = logits / self.config.final_logit_softcapping logits = torch.tanh(logits) logits = logits * self.config.final_logit_softcapping logits = logits.float() loss = None if labels is not None: logits = logits.float() shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, num_logits_to_keep=None, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, HybridCache) and attention_mask.ndim == 2 and (not self.config._attn_implementation == 'flash_attention_2'): if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) if num_logits_to_keep is not None: model_inputs['num_logits_to_keep'] = num_logits_to_keep model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings('\n The Gemma2 Model transformer with a sequence classification head on top (linear layer).\n\n [`Gemma2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GEMMA2_START_DOCSTRING) class Gemma2ForSequenceClassification(Gemma2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Gemma2Model(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The Gemma2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states\n output) e.g. for Named-Entity-Recognition (NER) tasks.\n ', GEMMA2_START_DOCSTRING) class Gemma2ForTokenClassification(Gemma2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = Gemma2Model(config) if getattr(config, 'classifier_dropout', None) is not None: classifier_dropout = config.classifier_dropout elif getattr(config, 'hidden_dropout', None) is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.score = nn.Linear(config.hidden_size, config.num_labels) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value @add_start_docstrings_to_model_forward(GEMMA2_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.score(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/git/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_git': ['GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_git'] = ['GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel'] if TYPE_CHECKING: from .configuration_git import GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/git/configuration_git.py import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GitVisionConfig(PretrainedConfig): model_type = 'git_vision_model' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'git': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class GitConfig(PretrainedConfig): model_type = 'git' def __init__(self, vision_config=None, vocab_size=30522, hidden_size=768, num_hidden_layers=6, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, tie_word_embeddings=False, bos_token_id=101, eos_token_id=102, num_image_with_embedding=None, **kwargs): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs) if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.') self.vision_config = GitVisionConfig(**vision_config) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings self.num_image_with_embedding = num_image_with_embedding self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id # File: transformers-main/src/transformers/models/git/convert_git_to_pytorch.py """""" import argparse from pathlib import Path import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import AutoTokenizer, CLIPImageProcessor, GitConfig, GitForCausalLM, GitProcessor, GitVisionConfig, VideoMAEImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_git_config(model_name): if 'base' in model_name and 'vqa' in model_name: image_size = 480 elif 'large' in model_name and 'vqa' in model_name: image_size = 420 else: image_size = 224 vision_config = GitVisionConfig(image_size=image_size) if 'large' in model_name: vision_config.patch_size = 14 vision_config.hidden_size = 1024 vision_config.intermediate_size = 4096 vision_config.num_hidden_layers = 24 vision_config.num_attention_heads = 16 is_video = 'vatex' in model_name or 'msrvtt' in model_name num_image_with_embedding = 6 if is_video else None config = GitConfig(vision_config=vision_config.to_dict(), num_image_with_embedding=num_image_with_embedding) return (config, image_size, is_video) def create_rename_keys(config, prefix=''): rename_keys = [] rename_keys.append((f'{prefix}image_encoder.class_embedding', 'git.image_encoder.vision_model.embeddings.class_embedding')) rename_keys.append((f'{prefix}image_encoder.positional_embedding', 'git.image_encoder.vision_model.embeddings.position_embedding.weight')) rename_keys.append((f'{prefix}image_encoder.conv1.weight', 'git.image_encoder.vision_model.embeddings.patch_embedding.weight')) rename_keys.append((f'{prefix}image_encoder.ln_pre.weight', 'git.image_encoder.vision_model.pre_layrnorm.weight')) rename_keys.append((f'{prefix}image_encoder.ln_pre.bias', 'git.image_encoder.vision_model.pre_layrnorm.bias')) rename_keys.append((f'{prefix}image_encoder.ln_post.weight', 'git.image_encoder.vision_model.post_layernorm.weight')) rename_keys.append((f'{prefix}image_encoder.ln_post.bias', 'git.image_encoder.vision_model.post_layernorm.bias')) rename_keys.append((f'{prefix}image_encoder.proj', 'git.image_encoder.visual_projection.weight')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.weight', f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.weight')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.bias', f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.bias')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.ln_1.weight', f'git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.weight')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.ln_1.bias', f'git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.bias')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.weight', f'git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.weight')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.bias', f'git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.bias')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.weight', f'git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.weight')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.bias', f'git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.bias')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.ln_2.weight', f'git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.weight')) rename_keys.append((f'{prefix}image_encoder.transformer.resblocks.{i}.ln_2.bias', f'git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.bias')) rename_keys.append((f'{prefix}textual.embedding.words.weight', 'git.embeddings.word_embeddings.weight')) rename_keys.append((f'{prefix}textual.embedding.positions.weight', 'git.embeddings.position_embeddings.weight')) rename_keys.append((f'{prefix}textual.visual_projection.0.weight', 'git.visual_projection.visual_projection.0.weight')) rename_keys.append((f'{prefix}textual.visual_projection.0.bias', 'git.visual_projection.visual_projection.0.bias')) rename_keys.append((f'{prefix}textual.visual_projection.1.weight', 'git.visual_projection.visual_projection.1.weight')) rename_keys.append((f'{prefix}textual.visual_projection.1.bias', 'git.visual_projection.visual_projection.1.bias')) rename_keys.append((f'{prefix}textual.embedding.layer_norm.weight', 'git.embeddings.LayerNorm.weight')) rename_keys.append((f'{prefix}textual.embedding.layer_norm.bias', 'git.embeddings.LayerNorm.bias')) rename_keys.append((f'{prefix}textual.output.weight', 'output.weight')) rename_keys.append((f'{prefix}textual.output.bias', 'output.bias')) for i in range(config.num_hidden_layers): rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.weight', f'git.encoder.layer.{i}.attention.self.query.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.bias', f'git.encoder.layer.{i}.attention.self.query.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.weight', f'git.encoder.layer.{i}.attention.self.key.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.bias', f'git.encoder.layer.{i}.attention.self.key.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.weight', f'git.encoder.layer.{i}.attention.self.value.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.bias', f'git.encoder.layer.{i}.attention.self.value.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.weight', f'git.encoder.layer.{i}.attention.output.dense.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.bias', f'git.encoder.layer.{i}.attention.output.dense.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.weight', f'git.encoder.layer.{i}.attention.output.LayerNorm.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.bias', f'git.encoder.layer.{i}.attention.output.LayerNorm.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.weight', f'git.encoder.layer.{i}.intermediate.dense.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.bias', f'git.encoder.layer.{i}.intermediate.dense.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.output.dense.weight', f'git.encoder.layer.{i}.output.dense.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.output.dense.bias', f'git.encoder.layer.{i}.output.dense.bias')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.weight', f'git.encoder.layer.{i}.output.LayerNorm.weight')) rename_keys.append((f'{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.bias', f'git.encoder.layer.{i}.output.LayerNorm.bias')) if config.num_image_with_embedding is not None: rename_keys.append(('img_temperal_embedding.0', 'git.img_temperal_embedding.0')) rename_keys.append(('img_temperal_embedding.1', 'git.img_temperal_embedding.1')) rename_keys.append(('img_temperal_embedding.2', 'git.img_temperal_embedding.2')) rename_keys.append(('img_temperal_embedding.3', 'git.img_temperal_embedding.3')) rename_keys.append(('img_temperal_embedding.4', 'git.img_temperal_embedding.4')) rename_keys.append(('img_temperal_embedding.5', 'git.img_temperal_embedding.5')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val.T if 'image_encoder.visual_projection' in new else val def read_in_q_k_v(state_dict, config, prefix=''): dim = config.vision_config.hidden_size for i in range(config.vision_config.num_hidden_layers): in_proj_weight = state_dict.pop(f'{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_weight') in_proj_bias = state_dict.pop(f'{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_bias') state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.weight'] = in_proj_weight[:dim, :] state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.bias'] = in_proj_bias[:dim] state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.weight'] = in_proj_weight[dim:dim * 2, :] state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.bias'] = in_proj_bias[dim:dim * 2] state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.weight'] = in_proj_weight[-dim:, :] state_dict[f'git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.bias'] = in_proj_bias[-dim:] def prepare_img(model_name): if 'textvqa' in model_name: filepath = hf_hub_download(repo_id='nielsr/textvqa-sample', filename='bus.png', repo_type='dataset') image = Image.open(filepath).convert('RGB') else: url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) return image def prepare_video(): from decord import VideoReader, cpu np.random.seed(0) def sample_frame_indices(clip_len, frame_sample_rate, seg_len): converted_len = int(clip_len * frame_sample_rate) end_idx = np.random.randint(converted_len, seg_len) start_idx = end_idx - converted_len indices = np.linspace(start_idx, end_idx, num=clip_len) indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) return indices file_path = hf_hub_download(repo_id='nielsr/video-demo', filename='eating_spaghetti.mp4', repo_type='dataset') videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0)) videoreader.seek(0) indices = sample_frame_indices(clip_len=6, frame_sample_rate=4, seg_len=len(videoreader)) video = videoreader.get_batch(indices).asnumpy() return video @torch.no_grad() def convert_git_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): model_name_to_url = {'git-base': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE/snapshot/model.pt', 'git-base-coco': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_COCO/snapshot/model.pt', 'git-base-textcaps': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTCAPS/snapshot/model.pt', 'git-base-vqav2': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VQAv2/snapshot/model.pt', 'git-base-textvqa': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTVQA/snapshot/model.pt', 'git-base-vatex': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VATEX/snapshot/model.pt', 'git-base-msrvtt-qa': 'https://publicgit.blob.core.windows.net/data/output/GIT_BASE_MSRVTT_QA/snapshot/model.pt', 'git-large': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE/snapshot/model.pt', 'git-large-coco': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_COCO/snapshot/model.pt', 'git-large-textcaps': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTCAPS/snapshot/model.pt', 'git-large-vqav2': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VQAv2/snapshot/model.pt', 'git-large-textvqa': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTVQA/snapshot/model.pt', 'git-large-vatex': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VATEX/snapshot/model.pt', 'git-large-msrvtt-qa': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_MSRVTT_QA/snapshot/model.pt', 'git-large-r': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R/snapshot/model.pt', 'git-large-r-coco': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_COCO/snapshot/model.pt', 'git-large-r-textcaps': 'https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_TEXTCAPS/snapshot/model.pt'} model_name_to_path = {'git-large': '/Users/nielsrogge/Documents/GIT/git_large_model.pt', 'git-large-coco': '/Users/nielsrogge/Documents/GIT/git_large_coco_model.pt', 'git-large-textcaps': '/Users/nielsrogge/Documents/GIT/git_large_textcaps_model.pt', 'git-large-vqav2': '/Users/nielsrogge/Documents/GIT/git_large_vqav2_model.pt', 'git-large-textvqa': '/Users/nielsrogge/Documents/GIT/git_large_textvqa_model.pt'} (config, image_size, is_video) = get_git_config(model_name) if 'large' in model_name and (not is_video) and ('large-r' not in model_name): checkpoint_path = model_name_to_path[model_name] state_dict = torch.load(checkpoint_path, map_location='cpu')['model'] else: checkpoint_url = model_name_to_url[model_name] state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu', file_name=model_name)['model'] prefix = 'module.' if model_name == 'git-base' else '' rename_keys = create_rename_keys(config, prefix=prefix) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, prefix=prefix) model = GitForCausalLM(config) (missing_keys, unexpected_keys) = model.load_state_dict(state_dict, strict=False) model.eval() print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) assert missing_keys == ['git.embeddings.position_ids', 'git.image_encoder.vision_model.embeddings.position_ids'] assert unexpected_keys == ['git.image_encoder.visual_projection.weight'] image_processor = VideoMAEImageProcessor(size={'shortest_edge': image_size}, crop_size={'height': image_size, 'width': image_size}) if is_video else CLIPImageProcessor(size={'shortest_edge': image_size}, crop_size={'height': image_size, 'width': image_size}) tokenizer = AutoTokenizer.from_pretrained('google-bert/bert-base-uncased', model_input_names=['input_ids', 'attention_mask']) processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor) if is_video: video = prepare_video() pixel_values = processor(images=list(video), return_tensors='pt').pixel_values else: image = prepare_img(model_name) image_transforms = Compose([Resize(image_size, interpolation=Image.BICUBIC), CenterCrop(image_size), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) original_pixel_values = image_transforms(image).unsqueeze(0) pixel_values = processor(images=image, return_tensors='pt').pixel_values assert torch.allclose(pixel_values, original_pixel_values) input_ids = torch.tensor([[101]]) outputs = model(input_ids, pixel_values=pixel_values) logits = outputs.logits print('Logits:', logits[0, -1, :3]) if model_name == 'git-base': expected_slice_logits = torch.tensor([-1.2832, -1.2835, -1.284]) elif model_name == 'git-base-coco': expected_slice_logits = torch.tensor([-0.9925, -0.993, -0.9935]) elif model_name == 'git-base-textcaps': expected_slice_logits = torch.tensor([-1.298, -1.2983, -1.2985]) elif model_name == 'git-base-vqav2': expected_slice_logits = torch.tensor([-0.857, -0.8568, -0.8561]) elif model_name == 'git-base-textvqa': expected_slice_logits = torch.tensor([-1.4085, -1.4083, -1.4082]) elif model_name == 'git-base-vatex': expected_slice_logits = torch.tensor([-1.3451, -1.3447, -1.3447]) elif model_name == 'git-base-msrvtt-qa': expected_slice_logits = torch.tensor([-0.8554, -0.855, -0.854]) elif model_name == 'git-large': expected_slice_logits = torch.tensor([-1.1708, -1.1707, -1.1705]) elif model_name == 'git-large-coco': expected_slice_logits = torch.tensor([-1.0425, -1.0423, -1.0422]) elif model_name == 'git-large-textcaps': expected_slice_logits = torch.tensor([-1.2705, -1.2708, -1.2706]) elif model_name == 'git-large-vqav2': expected_slice_logits = torch.tensor([-0.7042, -0.7043, -0.7043]) elif model_name == 'git-large-textvqa': expected_slice_logits = torch.tensor([-0.859, -0.8592, -0.859]) elif model_name == 'git-large-vatex': expected_slice_logits = torch.tensor([-1.0113, -1.0114, -1.0113]) elif model_name == 'git-large-msrvtt-qa': expected_slice_logits = torch.tensor([0.013, 0.0134, 0.0131]) elif model_name == 'git-large-r': expected_slice_logits = torch.tensor([-1.1283, -1.1285, -1.1286]) elif model_name == 'git-large-r-coco': expected_slice_logits = torch.tensor([-0.9641, -0.9641, -0.9641]) elif model_name == 'git-large-r-textcaps': expected_slice_logits = torch.tensor([-1.1121, -1.112, -1.1124]) assert torch.allclose(logits[0, -1, :3], expected_slice_logits, atol=0.0001) print('Looks ok!') prompt = '' if 'textvqa' in model_name: prompt = 'what does the front of the bus say at the top?' elif 'msrvtt-qa' in model_name: prompt = 'what does the woman eat?' elif 'vqa' in model_name: prompt = 'what are the cats doing?' input_ids = tokenizer(prompt, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) print('Generating caption...') generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) print('Generated caption:', processor.batch_decode(generated_ids, skip_special_tokens=True)) if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f'Pushing model and processor of {model_name} to the hub...') model.push_to_hub(f'microsoft/{model_name}') processor.push_to_hub(f'microsoft/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='git-base', type=str, help="Name of the model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub.') args = parser.parse_args() convert_git_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/git/modeling_git.py """""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...file_utils import ModelOutput from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_git import GitConfig, GitVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'microsoft/git-base' _CONFIG_FOR_DOC = 'GitConfig' @dataclass class GitVisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class GitEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length] if inputs_embeds is None: embeddings = self.word_embeddings(input_ids) else: embeddings = inputs_embeds if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class GitSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1) if config.num_image_with_embedding is not None: self.image_patch_tokens *= config.num_image_with_embedding self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) cutoff = self.image_patch_tokens if pixel_values_present else 0 key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) if past_key_value is not None: (key_layer_past, value_layer_past) = past_key_value.update(key_layer[:, :, cutoff:, :], value_layer[:, :, cutoff:, :], self.layer_idx) key_layer = torch.cat([key_layer[:, :, :cutoff, :], key_layer_past], dim=2) value_layer = torch.cat([value_layer[:, :, :cutoff, :], value_layer_past], dim=2) query_layer = self.transpose_for_scores(mixed_query_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': (query_length, key_length) = (query_layer.shape[2], key_layer.shape[2]) if past_key_value is not None: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class GitSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states GIT_SELF_ATTENTION_CLASSES = {'eager': GitSelfAttention} class GitAttention(nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__() self.self = GIT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type, layer_idx=layer_idx) self.output = GitSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, past_key_value, output_attentions, pixel_values_present) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class GitIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class GitOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class GitLayer(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = GitAttention(config, layer_idx=layer_idx) self.intermediate = GitIntermediate(config) self.output = GitOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> Tuple[torch.Tensor]: self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=past_key_value, pixel_values_present=pixel_values_present) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class GitEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([GitLayer(config, i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, pixel_values_present: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None next_decoder_cache = None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, past_key_values, output_attentions) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, past_key_values, output_attentions, pixel_values_present) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[-1] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions) class GitPreTrainedModel(PreTrainedModel): config_class = GitConfig base_model_prefix = 'git' supports_gradient_checkpointing = True _supports_cache_class = True _supports_quantized_cache = True def _init_weights(self, module): if isinstance(module, GitVisionEmbeddings): nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range) nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GIT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GitConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GIT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" class GitVisionEmbeddings(nn.Module): def __init__(self, config: GitVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class GitVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GitVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class GitVisionEncoderLayer(nn.Module): def __init__(self, config: GitVisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GitVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = GitVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class GitVisionEncoder(nn.Module): def __init__(self, config: GitVisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) GIT_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class GitVisionTransformer(nn.Module): def __init__(self, config: GitVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GitVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = GitVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) if not return_dict: return (last_hidden_state,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('The vision model from CLIP, used in GIT, without any head or projection on top.', GIT_START_DOCSTRING) class GitVisionModel(GitPreTrainedModel): config_class = GitVisionConfig main_input_name = 'pixel_values' def __init__(self, config: GitVisionConfig): super().__init__(config) self.vision_model = GitVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class GitProjection(nn.Module): def __init__(self, config: GitConfig): super().__init__() self.config = config self.visual_projection = nn.Sequential(nn.Linear(config.vision_config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps)) def forward(self, embeddings: torch.Tensor) -> torch.Tensor: return self.visual_projection(embeddings) @add_start_docstrings('The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states without any specific head on top.', GIT_START_DOCSTRING) class GitModel(GitPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = GitEmbeddings(config) self.image_encoder = GitVisionModel(config.vision_config) self.encoder = GitEncoder(config) self.visual_projection = GitProjection(config) if config.num_image_with_embedding is not None: self.img_temperal_embedding = nn.ParameterList((nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size)) for _ in range(config.num_image_with_embedding))) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor: mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1) mask = mask.masked_fill(mask == 1, float('-inf')) return mask def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None): num_tgt = tgt.shape[1] num_memory = memory.shape[1] device = tgt.device dtype = tgt.dtype top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype) top_right = torch.full((num_memory, num_tgt + past_key_values_length), float('-inf'), device=tgt.device, dtype=dtype) bottom_left = torch.zeros((num_tgt, num_memory), dtype=dtype, device=tgt_mask.device) if past_key_values_length > 0: tgt_mask = torch.zeros((tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length), dtype=dtype, device=tgt_mask.device) left = torch.cat((top_left, bottom_left), dim=0) right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0) full_attention_mask = torch.cat((left, right), dim=1)[None, :] if memory_key_padding_mask is None: memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device) if memory_key_padding_mask.dtype != torch.bool: raise ValueError('Memory key padding mask must be a boolean tensor.') zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype) zero_negative_infinity[memory_key_padding_mask] = float('-inf') full_attention_mask = full_attention_mask.expand((memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)) full_attention_mask = full_attention_mask.clone() origin_left = full_attention_mask[:, :, :num_memory] update = zero_negative_infinity[:, None, :] full_attention_mask[:, :, :num_memory] = origin_left + update full_attention_mask = full_attention_mask[:, None, :, :] return full_attention_mask @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') seq_length = input_shape[1] past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length() head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) projected_visual_features = None if pixel_values is not None: if pixel_values.ndim == 4: visual_features = self.image_encoder(pixel_values).last_hidden_state elif pixel_values.ndim == 5: visual_features = [] for frame_idx in range(pixel_values.shape[1]): visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state visual_features_frame += self.img_temperal_embedding[frame_idx] visual_features.append(visual_features_frame) visual_features = torch.cat(visual_features, dim=1) else: raise ValueError('pixel_values must be of rank 4 or 5') projected_visual_features = self.visual_projection(visual_features) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length) if projected_visual_features is None: projected_visual_features = torch.zeros((embedding_output.shape[0], 0, embedding_output.shape[2]), dtype=embedding_output.dtype, device=embedding_output.device) projected_visual_features = projected_visual_features.repeat(embedding_output.size(0) // projected_visual_features.size(0), 1, 1) hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1) tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device) combined_attention_mask = self.create_attention_mask(tgt=embedding_output, memory=projected_visual_features, tgt_mask=tgt_mask, past_key_values_length=past_key_values_length) if attention_mask is not None: expanded_attn_mask = _prepare_4d_attention_mask(attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]).to(embedding_output.device) if past_key_values_length > 0: expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :] else: combined_attention_mask[:, :, -input_shape[1]:, -input_shape[1]:] += expanded_attn_mask encoder_outputs = self.encoder(hidden_states, attention_mask=combined_attention_mask, head_mask=head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values_present=pixel_values is not None) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPast(last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('GIT Model with a `language modeling` head on top for autoregressive language modeling.', GIT_START_DOCSTRING) class GitForCausalLM(GitPreTrainedModel): _tied_weights_keys = ['output.weight'] def __init__(self, config): super().__init__(config) self.git = GitModel(config) self.output = nn.Linear(config.hidden_size, config.vocab_size) self.post_init() def get_output_embeddings(self): return self.output def set_output_embeddings(self, new_embeddings): self.output = new_embeddings @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, List[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.git(input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.output(sequence_output) loss = None if labels is not None: num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens shifted_logits = logits[:, num_image_tokens:-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs): if past_key_values is not None: past_length = past_key_values.get_seq_length() if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) return {'input_ids': input_ids, 'attention_mask': attention_mask, 'pixel_values': kwargs.get('pixel_values', None), 'past_key_values': past_key_values, 'use_cache': use_cache} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/git/processing_git.py """""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class GitProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'AutoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, text=None, images=None, return_tensors=None, **kwargs): (tokenizer_kwargs, image_processor_kwargs) = ({}, {}) if kwargs: tokenizer_kwargs = {k: v for (k, v) in kwargs.items() if k not in self.image_processor._valid_processor_keys} image_processor_kwargs = {k: v for (k, v) in kwargs.items() if k in self.image_processor._valid_processor_keys} if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **tokenizer_kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **image_processor_kwargs) if text is not None and images is not None: encoding['pixel_values'] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): return ['input_ids', 'attention_mask', 'pixel_values'] # File: transformers-main/src/transformers/models/glpn/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_glpn': ['GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_glpn'] = ['GLPNFeatureExtractor'] _import_structure['image_processing_glpn'] = ['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_glpn'] = ['GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel'] if TYPE_CHECKING: from .configuration_glpn import GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/glpn/configuration_glpn.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GLPNConfig(PretrainedConfig): model_type = 'glpn' def __init__(self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-06, decoder_hidden_size=64, max_depth=10, head_in_index=-1, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.depths = depths self.sr_ratios = sr_ratios self.hidden_sizes = hidden_sizes self.patch_sizes = patch_sizes self.strides = strides self.mlp_ratios = mlp_ratios self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.drop_path_rate = drop_path_rate self.layer_norm_eps = layer_norm_eps self.decoder_hidden_size = decoder_hidden_size self.max_depth = max_depth self.head_in_index = head_in_index # File: transformers-main/src/transformers/models/glpn/convert_glpn_to_pytorch.py """""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def rename_keys(state_dict): new_state_dict = OrderedDict() for (key, value) in state_dict.items(): if key.startswith('module.encoder'): key = key.replace('module.encoder', 'glpn.encoder') if key.startswith('module.decoder'): key = key.replace('module.decoder', 'decoder.stages') if 'patch_embed' in key: idx = key[key.find('patch_embed') + len('patch_embed')] key = key.replace(f'patch_embed{idx}', f'patch_embeddings.{int(idx) - 1}') if 'norm' in key: key = key.replace('norm', 'layer_norm') if 'glpn.encoder.layer_norm' in key: idx = key[key.find('glpn.encoder.layer_norm') + len('glpn.encoder.layer_norm')] key = key.replace(f'layer_norm{idx}', f'layer_norm.{int(idx) - 1}') if 'layer_norm1' in key: key = key.replace('layer_norm1', 'layer_norm_1') if 'layer_norm2' in key: key = key.replace('layer_norm2', 'layer_norm_2') if 'block' in key: idx = key[key.find('block') + len('block')] key = key.replace(f'block{idx}', f'block.{int(idx) - 1}') if 'attn.q' in key: key = key.replace('attn.q', 'attention.self.query') if 'attn.proj' in key: key = key.replace('attn.proj', 'attention.output.dense') if 'attn' in key: key = key.replace('attn', 'attention.self') if 'fc1' in key: key = key.replace('fc1', 'dense1') if 'fc2' in key: key = key.replace('fc2', 'dense2') if 'linear_pred' in key: key = key.replace('linear_pred', 'classifier') if 'linear_fuse' in key: key = key.replace('linear_fuse.conv', 'linear_fuse') key = key.replace('linear_fuse.bn', 'batch_norm') if 'linear_c' in key: idx = key[key.find('linear_c') + len('linear_c')] key = key.replace(f'linear_c{idx}', f'linear_c.{int(idx) - 1}') if 'bot_conv' in key: key = key.replace('bot_conv', '0.convolution') if 'skip_conv1' in key: key = key.replace('skip_conv1', '1.convolution') if 'skip_conv2' in key: key = key.replace('skip_conv2', '2.convolution') if 'fusion1' in key: key = key.replace('fusion1', '1.fusion') if 'fusion2' in key: key = key.replace('fusion2', '2.fusion') if 'fusion3' in key: key = key.replace('fusion3', '3.fusion') if 'fusion' in key and 'conv' in key: key = key.replace('conv', 'convolutional_layer') if key.startswith('module.last_layer_depth'): key = key.replace('module.last_layer_depth', 'head.head') new_state_dict[key] = value return new_state_dict def read_in_k_v(state_dict, config): for i in range(config.num_encoder_blocks): for j in range(config.depths[i]): kv_weight = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight') kv_bias = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias') state_dict[f'glpn.encoder.block.{i}.{j}.attention.self.key.weight'] = kv_weight[:config.hidden_sizes[i], :] state_dict[f'glpn.encoder.block.{i}.{j}.attention.self.key.bias'] = kv_bias[:config.hidden_sizes[i]] state_dict[f'glpn.encoder.block.{i}.{j}.attention.self.value.weight'] = kv_weight[config.hidden_sizes[i]:, :] state_dict[f'glpn.encoder.block.{i}.{j}.attention.self.value.bias'] = kv_bias[config.hidden_sizes[i]:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) return image @torch.no_grad() def convert_glpn_checkpoint(checkpoint_path, pytorch_dump_folder_path, push_to_hub=False, model_name=None): config = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3]) image_processor = GLPNImageProcessor() image = prepare_img() pixel_values = image_processor(images=image, return_tensors='pt').pixel_values logger.info('Converting model...') state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu')) state_dict = rename_keys(state_dict) read_in_k_v(state_dict, config) model = GLPNForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() outputs = model(pixel_values) predicted_depth = outputs.predicted_depth if model_name is not None: if 'nyu' in model_name: expected_slice = torch.tensor([[4.4147, 4.0873, 4.0673], [3.789, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]]) elif 'kitti' in model_name: expected_slice = torch.tensor([[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]]) else: raise ValueError(f'Unknown model name: {model_name}') expected_shape = torch.Size([1, 480, 640]) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=0.0001) print('Looks ok!') if push_to_hub: logger.info('Pushing model and image processor to the hub...') model.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add model', use_temp_dir=True) image_processor.push_to_hub(repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization='nielsr', commit_message='Add image processor', use_temp_dir=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.') parser.add_argument('--model_name', default='glpn-kitti', type=str, help="Name of the model in case you're pushing to the hub.") args = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) # File: transformers-main/src/transformers/models/glpn/feature_extraction_glpn.py """""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor logger = logging.get_logger(__name__) class GLPNFeatureExtractor(GLPNImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use GLPNImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/glpn/image_processing_glpn.py """""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ChannelDimension, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, logging logger = logging.get_logger(__name__) class GLPNImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size_divisor: int=32, resample=PILImageResampling.BILINEAR, do_rescale: bool=True, **kwargs) -> None: self.do_resize = do_resize self.do_rescale = do_rescale self.size_divisor = size_divisor self.resample = resample super().__init__(**kwargs) def resize(self, image: np.ndarray, size_divisor: int, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: (height, width) = get_image_size(image, channel_dim=input_data_format) new_h = height // size_divisor * size_divisor new_w = width // size_divisor * size_divisor image = resize(image, (new_h, new_w), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return image @filter_out_non_signature_kwargs() def preprocess(self, images: Union['PIL.Image.Image', TensorType, List['PIL.Image.Image'], List[TensorType]], do_resize: Optional[bool]=None, size_divisor: Optional[int]=None, resample=None, do_rescale: Optional[bool]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature: do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_resize=do_resize, size=size_divisor, resample=resample) images = [to_numpy_array(img) for img in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/glpn/modeling_glpn.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_glpn import GLPNConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GLPNConfig' _CHECKPOINT_FOR_DOC = 'vinvino02/glpn-kitti' _EXPECTED_OUTPUT_SHAPE = [1, 512, 15, 20] def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class GLPNDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class GLPNOverlapPatchEmbeddings(nn.Module): def __init__(self, patch_size, stride, num_channels, hidden_size): super().__init__() self.proj = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2) self.layer_norm = nn.LayerNorm(hidden_size) def forward(self, pixel_values): embeddings = self.proj(pixel_values) (_, _, height, width) = embeddings.shape embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return (embeddings, height, width) class GLPNEfficientSelfAttention(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError(f'The hidden size ({self.hidden_size}) is not a multiple of the number of attention heads ({self.num_attention_heads})') self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size) self.key = nn.Linear(self.hidden_size, self.all_head_size) self.value = nn.Linear(self.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = nn.Conv2d(hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio) self.layer_norm = nn.LayerNorm(hidden_size) def transpose_for_scores(self, hidden_states): new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward(self, hidden_states, height, width, output_attentions=False): query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.sr_ratio > 1: (batch_size, seq_len, num_channels) = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = self.sr(hidden_states) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class GLPNSelfOutput(nn.Module): def __init__(self, config, hidden_size): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GLPNAttention(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.self = GLPNEfficientSelfAttention(config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio) self.output = GLPNSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, height, width, output_attentions=False): self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class GLPNDWConv(nn.Module): def __init__(self, dim=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): (batch_size, seq_len, num_channels) = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states class GLPNMixFFN(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None): super().__init__() out_features = out_features or in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = GLPNDWConv(hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, height, width): hidden_states = self.dense1(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GLPNLayer(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size) self.attention = GLPNAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio) self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states, height, width, output_attentions=False): self_attention_outputs = self.attention(self.layer_norm_1(hidden_states), height, width, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) mlp_output = self.drop_path(mlp_output) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs class GLPNEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append(GLPNOverlapPatchEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i])) self.patch_embeddings = nn.ModuleList(embeddings) blocks = [] cur = 0 for i in range(config.num_encoder_blocks): layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append(GLPNLayer(config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=dpr[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i])) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) self.layer_norm = nn.ModuleList([nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)]) def forward(self, pixel_values, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for (idx, x) in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)): (embedding_layer, block_layer, norm_layer) = x (hidden_states, height, width) = embedding_layer(hidden_states) for (i, blk) in enumerate(block_layer): layer_outputs = blk(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = norm_layer(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class GLPNPreTrainedModel(PreTrainedModel): config_class = GLPNConfig base_model_prefix = 'glpn' main_input_name = 'pixel_values' _no_split_modules = [] def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GLPN_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`GLPNConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GLPN_INPUTS_DOCSTRING = '\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`GLPNImageProcessor.__call__`] for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.', GLPN_START_DOCSTRING) class GLPNModel(GLPNPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = GLPNEncoder(config) self.post_init() def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format('(batch_size, sequence_length)')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class GLPNSelectiveFeatureFusion(nn.Module): def __init__(self, in_channel=64): super().__init__() self.convolutional_layer1 = nn.Sequential(nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(in_channel), nn.ReLU()) self.convolutional_layer2 = nn.Sequential(nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(int(in_channel / 2)), nn.ReLU()) self.convolutional_layer3 = nn.Conv2d(in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1) self.sigmoid = nn.Sigmoid() def forward(self, local_features, global_features): features = torch.cat((local_features, global_features), dim=1) features = self.convolutional_layer1(features) features = self.convolutional_layer2(features) features = self.convolutional_layer3(features) attn = self.sigmoid(features) hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[:, 1, :, :].unsqueeze(1) return hybrid_features class GLPNDecoderStage(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() should_skip = in_channels == out_channels self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity() self.fusion = GLPNSelectiveFeatureFusion(out_channels) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, hidden_state, residual=None): hidden_state = self.convolution(hidden_state) if residual is not None: hidden_state = self.fusion(hidden_state, residual) hidden_state = self.upsample(hidden_state) return hidden_state hidden_state = self.upsample(hidden_state) return hidden_state class GLPNDecoder(nn.Module): def __init__(self, config): super().__init__() reserved_hidden_sizes = config.hidden_sizes[::-1] out_channels = config.decoder_hidden_size self.stages = nn.ModuleList([GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes]) self.stages[0].fusion = None self.final_upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, hidden_states: List[torch.Tensor]) -> List[torch.Tensor]: stage_hidden_states = [] stage_hidden_state = None for (hidden_state, stage) in zip(hidden_states[::-1], self.stages): stage_hidden_state = stage(hidden_state, stage_hidden_state) stage_hidden_states.append(stage_hidden_state) stage_hidden_states[-1] = self.final_upsample(stage_hidden_state) return stage_hidden_states class SiLogLoss(nn.Module): def __init__(self, lambd=0.5): super().__init__() self.lambd = lambd def forward(self, pred, target): valid_mask = (target > 0).detach() diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask]) loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2)) return loss class GLPNDepthEstimationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config channels = config.decoder_hidden_size self.head = nn.Sequential(nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1)) def forward(self, hidden_states: List[torch.Tensor]) -> torch.Tensor: hidden_states = hidden_states[self.config.head_in_index] hidden_states = self.head(hidden_states) predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @add_start_docstrings('GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.', GLPN_START_DOCSTRING) class GLPNForDepthEstimation(GLPNPreTrainedModel): def __init__(self, config): super().__init__(config) self.glpn = GLPNModel(config) self.decoder = GLPNDecoder(config) self.head = GLPNDepthEstimationHead(config) self.post_init() @add_start_docstrings_to_model_forward(GLPN_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=DepthEstimatorOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.glpn(pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs.hidden_states if return_dict else outputs[1] out = self.decoder(hidden_states) predicted_depth = self.head(out) loss = None if labels is not None: loss_fct = SiLogLoss() loss = loss_fct(predicted_depth, labels) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return (loss,) + output if loss is not None else output return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gpt2/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_keras_nlp_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available _import_structure = {'configuration_gpt2': ['GPT2Config', 'GPT2OnnxConfig'], 'tokenization_gpt2': ['GPT2Tokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gpt2_fast'] = ['GPT2TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gpt2'] = ['GPT2DoubleHeadsModel', 'GPT2ForQuestionAnswering', 'GPT2ForSequenceClassification', 'GPT2ForTokenClassification', 'GPT2LMHeadModel', 'GPT2Model', 'GPT2PreTrainedModel', 'load_tf_weights_in_gpt2'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_gpt2'] = ['TFGPT2DoubleHeadsModel', 'TFGPT2ForSequenceClassification', 'TFGPT2LMHeadModel', 'TFGPT2MainLayer', 'TFGPT2Model', 'TFGPT2PreTrainedModel'] try: if not is_keras_nlp_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gpt2_tf'] = ['TFGPT2Tokenizer'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_gpt2'] = ['FlaxGPT2LMHeadModel', 'FlaxGPT2Model', 'FlaxGPT2PreTrainedModel'] if TYPE_CHECKING: from .configuration_gpt2 import GPT2Config, GPT2OnnxConfig from .tokenization_gpt2 import GPT2Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt2_fast import GPT2TokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt2 import GPT2DoubleHeadsModel, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, GPT2LMHeadModel, GPT2Model, GPT2PreTrainedModel, load_tf_weights_in_gpt2 try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_gpt2 import TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, TFGPT2MainLayer, TFGPT2Model, TFGPT2PreTrainedModel try: if not is_keras_nlp_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt2_tf import TFGPT2Tokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt2 import FlaxGPT2LMHeadModel, FlaxGPT2Model, FlaxGPT2PreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt2/configuration_gpt2.py """""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging logger = logging.get_logger(__name__) class GPT2Config(PretrainedConfig): model_type = 'gpt2' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_new', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx self.reorder_and_upcast_attn = reorder_and_upcast_attn self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) class GPT2OnnxConfig(OnnxConfigWithPast): def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False): super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) if not getattr(self._config, 'pad_token_id', None): self._config.pad_token_id = 0 @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'} else: common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'} return common_inputs @property def num_layers(self) -> int: return self._config.n_layer @property def num_attention_heads(self) -> int: return self._config.n_head def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']}) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 past_shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads) ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)] ordered_inputs['attention_mask'] = common_inputs['attention_mask'] if self.use_past: mask_dtype = ordered_inputs['attention_mask'].dtype ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13 # File: transformers-main/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2 from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): if gpt2_config_file == '': config = GPT2Config() else: config = GPT2Config.from_json_file(gpt2_config_file) model = GPT2Model(config) load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path) pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}') torch.save(model.state_dict(), pytorch_weights_dump_path) print(f'Save configuration file to {pytorch_config_dump_path}') with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') parser.add_argument('--gpt2_config_file', default='', type=str, help='An optional config json file corresponding to the pre-trained OpenAI model. \nThis specifies the model architecture.') args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/gpt2/modeling_flax_gpt2.py from typing import Any, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai-community/gpt2' _CONFIG_FOR_DOC = 'GPT2Config' GPT2_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`GPT2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' GPT2_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class FlaxConv1D(nn.Module): features: int use_bias: bool = True dtype: Any = jnp.float32 precision: Any = None @nn.compact def __call__(self, inputs): inputs = jnp.asarray(inputs, self.dtype) kernel = self.param('kernel', jax.nn.initializers.normal(stddev=0.02), (self.features, inputs.shape[-1])) kernel = jnp.asarray(kernel.transpose(), self.dtype) y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())), precision=self.precision) if self.use_bias: bias = self.param('bias', jax.nn.initializers.zeros, (self.features,)) bias = jnp.asarray(bias, self.dtype) y = y + bias return y class FlaxGPT2Attention(nn.Module): config: GPT2Config dtype: jnp.dtype = jnp.float32 causal: bool = True is_cross_attention: bool = False def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.is_cross_attention: self.c_attn = FlaxConv1D(2 * self.embed_dim, dtype=self.dtype) self.q_attn = FlaxConv1D(self.embed_dim, dtype=self.dtype) else: self.c_attn = FlaxConv1D(3 * self.embed_dim, dtype=self.dtype) self.c_proj = FlaxConv1D(self.embed_dim, dtype=self.dtype) self.resid_dropout = nn.Dropout(rate=config.resid_pdrop) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype='bool'), dtype='bool') def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, key_value_states: Optional[jnp.ndarray]=None, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] if not is_cross_attention: qkv_out = self.c_attn(hidden_states) (query, key, value) = jnp.split(qkv_out, 3, axis=2) else: q_out = self.q_attn(hidden_states) (query,) = jnp.split(q_out, 1, axis=2) kv_out = self.c_attn(key_value_states) (key, value) = jnp.split(kv_out, 2, axis=2) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) (query_length, key_length) = (query.shape[1], key.shape[1]) if self.causal: if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) dropout_rng = None if not deterministic and self.config.attn_pdrop > 0.0: dropout_rng = self.make_rng('dropout') if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key, value, attention_mask) = self._concatenate_to_cache(key, value, query, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attn_pdrop, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPT2MLP(nn.Module): config: GPT2Config intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size self.c_fc = FlaxConv1D(self.intermediate_size, dtype=self.dtype) self.c_proj = FlaxConv1D(embed_dim, dtype=self.dtype) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_pdrop) def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPT2Block(nn.Module): config: GPT2Config dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPT2Attention(self.config, dtype=self.dtype) self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxGPT2Attention(config=self.config, dtype=self.dtype, causal=False, is_cross_attention=True) self.ln_cross_attn = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxGPT2MLP(self.config, inner_dim, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual if encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention(hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, deterministic=deterministic, output_attentions=output_attentions) attn_output = cross_attn_outputs[0] hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[1:] residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) hidden_states = residual + feed_forward_hidden_states outputs = (hidden_states,) + outputs return outputs class FlaxGPT2PreTrainedModel(FlaxPreTrainedModel): config_class = GPT2Config base_model_prefix = 'transformer' module_class: nn.Module = None def __init__(self, config: GPT2Config, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, return_dict=False) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) random_params = module_init_outputs['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) def __call__(self, input_ids, attention_mask=None, position_ids=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, params: dict=None, past_key_values: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if encoder_hidden_states is not None and encoder_attention_mask is None: (batch_size, sequence_length) = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) (batch_size, sequence_length) = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `position_ids` when passing `past_key_values`.') position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), encoder_hidden_states, encoder_attention_mask, not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxGPT2BlockCollection(nn.Module): config: GPT2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [FlaxGPT2Block(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask=None, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block(hidden_states, attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) return outputs class FlaxGPT2Module(nn.Module): config: GPT2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size self.wte = nn.Embed(self.config.vocab_size, self.embed_dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.wpe = nn.Embed(self.config.max_position_embeddings, self.embed_dim, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.embd_pdrop) self.h = FlaxGPT2BlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): input_embeds = self.wte(input_ids.astype('i4')) position_embeds = self.wpe(position_ids.astype('i4')) hidden_states = input_embeds + position_embeds hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.h(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[2], cross_attentions=outputs[3]) @add_start_docstrings('The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.', GPT2_START_DOCSTRING) class FlaxGPT2Model(FlaxGPT2PreTrainedModel): module_class = FlaxGPT2Module append_call_sample_docstring(FlaxGPT2Model, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPastAndCrossAttentions, _CONFIG_FOR_DOC) class FlaxGPT2LMHeadModule(nn.Module): config: GPT2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPT2Module(self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask, position_ids, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.transformer(input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables['params']['wte']['embedding'].T lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) @add_start_docstrings('\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT2_START_DOCSTRING) class FlaxGPT2LMHeadModel(FlaxGPT2PreTrainedModel): module_class = FlaxGPT2LMHeadModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask.astype('i4'), (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGPT2LMHeadModel, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/gpt2/modeling_gpt2.py """""" import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, get_torch_version, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gpt2 import GPT2Config if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai-community/gpt2' _CONFIG_FOR_DOC = 'GPT2Config' def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): try: import re import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(gpt2_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info(f'Loading TF weight {name} with shape {shape}') array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for (name, array) in zip(names, arrays): name = name[6:] name = name.split('/') pointer = model for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): scope_names = re.split('(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'w' or scope_names[0] == 'g': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'b': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'wpe' or scope_names[0] == 'wte': pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] try: if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched') except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) return model class GPT2Attention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False) self.register_buffer('masked_bias', torch.tensor(-10000.0), persistent=False) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = True self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size]) self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) self.split_size = self.split_size // self.num_heads * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, query, key, value, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full([], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device) if self.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(self.layer_idx + 1) if not self.is_cross_attention: (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.full([], mask_value, dtype=attn_weights.dtype, device=attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): (bsz, num_heads, q_seq_len, dk) = query.size() (_, _, k_seq_len, _) = key.size() attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) with torch.amp.autocast(query.device.type, enabled=False): (q, k) = (query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if attn_weights.dtype != torch.float32: raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32') attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: if encoder_hidden_states is not None: if not hasattr(self, 'q_attn'): raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) (key, value) = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: (query, key, value) = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: (past_key, past_value) = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None if self.reorder_and_upcast_attn: (attn_output, attn_weights) = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) else: (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs class GPT2FlashAttention2(GPT2Attention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: (bsz, _, _) = hidden_states.size() if encoder_hidden_states is not None: if not hasattr(self, 'q_attn'): raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) (key, value) = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: (query, key, value) = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = None if use_cache is True: present = (key, value) query_length = query.shape[2] tgt_len = key.shape[2] query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) attn_dropout = self.attn_dropout.p if self.training else 0.0 if query.dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.c_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attn_output = _flash_attention_forward(query, key, value, attention_mask, query_length, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) attn_output = self.c_proj(attn_weights_reshaped) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights_reshaped,) return outputs class GPT2SdpaAttention(GPT2Attention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse('2.2.0') def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: if output_attentions or head_mask is not None: logger.warning_once('`GPT2SdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions) (bsz, q_len, _) = hidden_states.size() is_cross_attention = encoder_hidden_states is not None if is_cross_attention: if not hasattr(self, 'q_attn'): raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2SdpaAttention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) (key, value) = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: (query, key, value) = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) present = None if use_cache is True: present = (key, value) if self.require_contiguous_qkv and query.device.type == 'cuda' and (attention_mask is not None): query = query.contiguous() key = key.contiguous() value = value.contiguous() is_causal = True if attention_mask is None and q_len > 1 and (not is_cross_attention) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=self.attn_dropout.p if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.embed_dim) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) return (attn_output, present, None) class GPT2MLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states GPT2_ATTENTION_CLASSES = {'eager': GPT2Attention, 'flash_attention_2': GPT2FlashAttention2, 'sdpa': GPT2SdpaAttention} class GPT2Block(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size attention_class = GPT2_ATTENTION_CLASSES[config._attn_implementation] self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = attention_class(config=config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = attention_class(config=config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPT2MLP(inner_dim, config) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]], layer_past: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual if encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions) attn_output = cross_attn_outputs[0] hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class GPT2PreTrainedModel(PreTrainedModel): config_class = GPT2Config load_tf_weights = load_tf_weights_in_gpt2 base_model_prefix = 'transformer' is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ['GPT2Block'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_sdpa = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): if isinstance(module, (nn.Linear, Conv1D)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) for (name, p) in module.named_parameters(): if name == 'c_proj.weight': p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) @dataclass class GPT2DoubleHeadsModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None mc_loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mc_logits: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None GPT2_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GPT2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT2_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for\n `past_key_values`. In other words, the `attention_mask` always has to have the length:\n `len(past_key_values) + len(input_ids)`\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" PARALLELIZE_DOCSTRING = '\n This is an experimental feature and is a subject to change at a moment\'s notice.\n\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n\n Args:\n device_map (`Dict[int, list]`, *optional*):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the\n following number of attention modules:\n\n - openai-community/gpt2: 12\n - openai-community/gpt2-medium: 24\n - openai-community/gpt2-large: 36\n - openai-community/gpt2-xl: 48\n\n Example:\n\n ```python\n # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:\n model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6, 7, 8],\n 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],\n 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],\n }\n model.parallelize(device_map)\n ```\n' DEPARALLELIZE_DOCSTRING = '\n Moves the model to cpu from a model parallel state.\n\n Example:\n\n ```python\n # On a 4 GPU machine with openai-community/gpt2-large:\n model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6, 7],\n 1: [8, 9, 10, 11, 12, 13, 14, 15],\n 2: [16, 17, 18, 19, 20, 21, 22, 23],\n 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],\n }\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n ```\n' @add_start_docstrings('The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.', GPT2_START_DOCSTRING) class GPT2Model(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.model_parallel = False self.device_map = None self.gradient_checkpointing = False self._attn_implementation = config._attn_implementation self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = 'cpu' if 'cpu' in self.device_map.keys() else 'cuda:' + str(min(self.device_map.keys())) self.last_device = 'cuda:' + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) self.wpe = self.wpe.to(self.first_device) for (k, v) in self.device_map.items(): for block in v: cuda_device = 'cuda:' + str(k) self.h[block] = self.h[block].to(cuda_device) self.ln_f = self.ln_f.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.model_parallel = False self.device_map = None self.first_device = 'cpu' self.last_device = 'cpu' self.wte = self.wte.to('cpu') self.wpe = self.wpe.to('cpu') for index in range(len(self.h)): self.h[index] = self.h[index].to('cpu') self.ln_f = self.ln_f.to('cpu') torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds _use_sdpa = self._attn_implementation == 'sdpa' and output_attentions is False and (head_mask is None) attention_mask = attention_mask.view(batch_size, -1) if attention_mask is not None else None if self._attn_implementation == 'flash_attention_2': attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None elif _use_sdpa: attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask=attention_mask, input_shape=(batch_size, input_shape[-1]), inputs_embeds=inputs_embeds, past_key_values_length=past_length) elif attention_mask is not None: attention_mask = attention_mask[:, None, None, :] attention_mask = attention_mask.to(dtype=self.dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min if self.config.add_cross_attention and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if _use_sdpa: encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1]) elif not self._attn_implementation == 'flash_attention_2': encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_layer) if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if self.model_parallel: torch.cuda.set_device(hidden_states.device) if layer_past is not None: layer_past = tuple((past_state.to(hidden_states.device) for past_state in layer_past)) if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions) else: outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) if self.model_parallel: for (k, v) in self.device_map.items(): if i == v[-1] and 'cuda:' + str(k) != self.last_device: hidden_states = hidden_states.to('cuda:' + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) @add_start_docstrings('\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT2_START_DOCSTRING) class GPT2LMHeadModel(GPT2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.transformer.deparallelize() self.transformer = self.transformer.to('cpu') self.lm_head = self.lm_head.to('cpu') self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}) return model_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('\nThe GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\nRocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\ninput embeddings, the classification head takes as input the input of a specified classification token index in the\ninput sequence).\n', GPT2_START_DOCSTRING) class GPT2DoubleHeadsModel(GPT2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) config.num_labels = 1 self.transformer = GPT2Model(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = SequenceSummary(config) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.transformer.deparallelize() self.transformer = self.transformer.to('cpu') self.lm_head = self.lm_head.to('cpu') self.multiple_choice_head = self.multiple_choice_head.to('cpu') self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, inputs_embeds=None, past_key_values=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids.contiguous()} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}) return model_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, mc_token_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, mc_labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, GPT2DoubleHeadsModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) mc_loss = None if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return (lm_loss,) + output if lm_loss is not None else output return GPT2DoubleHeadsModelOutput(loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('\n The GPT2 Model transformer with a sequence classification head on top (linear layer).\n\n [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPT2_START_DOCSTRING) class GPT2ForSequenceClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint='microsoft/DialogRPT-updown', output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] assert self.config.pad_token_id is not None or batch_size == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', GPT2_START_DOCSTRING) class GPT2ForTokenClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint='brad1141/gpt2-finetuned-comp2', output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25, expected_output=['Lead', 'Lead', 'Lead', 'Position', 'Lead', 'Lead', 'Lead', 'Lead', 'Lead', 'Lead', 'Lead', 'Lead']) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', GPT2_START_DOCSTRING) class GPT2ForQuestionAnswering(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPT2Model(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gpt2/modeling_tf_gpt2.py """""" from __future__ import annotations from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFSequenceClassifierOutputWithPast from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFConv1D, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai-community/gpt2' _CONFIG_FOR_DOC = 'GPT2Config' class TFAttention(keras.layers.Layer): def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): super().__init__(**kwargs) n_state = nx assert n_state % config.n_head == 0 self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.is_cross_attention = is_cross_attention if self.is_cross_attention: self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name='c_attn') self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='q_attn') else: self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn') self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj') self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() self.embed_dim = n_state def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) w = w / tf.math.sqrt(dk) if not self.is_cross_attention: (_, _, nd, ns) = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 10000.0 * (1 - b) if attention_mask is not None: attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = stable_softmax(w, axis=-1) w = self.attn_dropout(w, training=training) if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) def call(self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False): if encoder_hidden_states is not None: if not hasattr(self, 'q_attn'): raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.') query = self.q_attn(x) kv_out = self.c_attn(encoder_hidden_states) (key, value) = tf.split(kv_out, 2, axis=2) attention_mask = encoder_attention_mask else: x = self.c_attn(x) (query, key, value) = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: (past_key, past_value) = tf.unstack(layer_past, axis=0, num=2) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) if use_cache: present = tf.stack([key, value], axis=0) else: present = (None,) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if self.is_cross_attention: c_attn_shape = 2 * self.embed_dim else: c_attn_shape = 3 * self.embed_dim if getattr(self, 'c_proj', None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.embed_dim]) if getattr(self, 'c_attn', None) is not None: with tf.name_scope(self.c_attn.name): self.c_attn.build([None, None, c_attn_shape]) if getattr(self, 'q_attn', None) is not None: with tf.name_scope(self.q_attn.name): self.q_attn.build([None, None, self.embed_dim]) class TFMLP(keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc') self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj') self.act = get_tf_activation(config.activation_function) self.dropout = keras.layers.Dropout(config.resid_pdrop) self.intermediate_size = n_state self.embed_dim = nx def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'c_fc', None) is not None: with tf.name_scope(self.c_fc.name): self.c_fc.build([None, None, self.intermediate_size]) if getattr(self, 'c_proj', None) is not None: with tf.name_scope(self.c_proj.name): self.c_proj.build([None, None, self.embed_dim]) class TFBlock(keras.layers.Layer): def __init__(self, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd inner_dim = config.n_inner if config.n_inner is not None else 4 * nx self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.attn = TFAttention(nx, config, scale, name='attn') self.ln_2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2') if config.add_cross_attention: self.crossattention = TFAttention(nx, config, scale, name='crossattention', is_cross_attention=True) self.ln_cross_attn = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_cross_attn') self.mlp = TFMLP(inner_dim, config, name='mlp') self.hidden_size = config.hidden_size def call(self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False): a = self.ln_1(x) output_attn = self.attn(a, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, training=training) a = output_attn[0] outputs = output_attn[1:] x = x + a if encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') ca = self.ln_cross_attn(x) output_cross_attn = self.crossattention(ca, layer_past=None, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=False, output_attentions=output_attentions, training=training) ca = output_cross_attn[0] x = x + ca outputs = outputs + output_cross_attn[2:] m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + outputs return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'ln_1', None) is not None: with tf.name_scope(self.ln_1.name): self.ln_1.build([None, None, self.hidden_size]) if getattr(self, 'attn', None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, 'ln_2', None) is not None: with tf.name_scope(self.ln_2.name): self.ln_2.build([None, None, self.hidden_size]) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'crossattention', None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) if getattr(self, 'ln_cross_attn', None) is not None: with tf.name_scope(self.ln_cross_attn.name): self.ln_cross_attn.build([None, None, self.hidden_size]) @keras_serializable class TFGPT2MainLayer(keras.layers.Layer): config_class = GPT2Config def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.wte = keras.layers.Embedding(input_dim=config.vocab_size, output_dim=config.hidden_size, embeddings_initializer=get_initializer(config.initializer_range), name='wte') self.wpe = keras.layers.Embedding(input_dim=config.n_positions, output_dim=config.n_embd, embeddings_initializer=get_initializer(config.initializer_range), name='wpe') self.drop = keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config, scale=True, name=f'h_._{i}') for i in range(config.n_layer)] self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f') self.embed_dim = config.hidden_size def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) if attention_mask is not None: attention_mask_shape = shape_list(attention_mask) attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) if self.config.add_cross_attention and encoder_attention_mask is not None: encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None encoder_attention_mask = encoder_extended_attention_mask if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids) else: token_type_embeds = tf.constant(0.0) position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype) token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block(hidden_states, layer_past, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=training) (hidden_states, present) = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (outputs[3],) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple((tf.reshape(t, attention_output_shape) for t in all_attentions)) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions] if v is not None)) return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'wte', None) is not None: with tf.name_scope(self.wte.name): self.wte.build(None) if getattr(self, 'wpe', None) is not None: with tf.name_scope(self.wpe.name): self.wpe.build(None) if getattr(self, 'ln_f', None) is not None: with tf.name_scope(self.ln_f.name): self.ln_f.build([None, None, self.embed_dim]) if getattr(self, 'h', None) is not None: for layer in self.h: with tf.name_scope(layer.name): layer.build(None) class TFGPT2PreTrainedModel(TFPreTrainedModel): config_class = GPT2Config base_model_prefix = 'transformer' _keys_to_ignore_on_load_unexpected = ['h.\\d+.attn.bias', 'h.\\d+.crossattention.bias'] @property def input_signature(self): return {'input_ids': tf.TensorSpec((None, None), tf.int32, name='input_ids'), 'attention_mask': tf.TensorSpec((None, None), tf.int32, name='attention_mask')} @dataclass class TFGPT2DoubleHeadsModelOutput(ModelOutput): logits: tf.Tensor = None mc_logits: tf.Tensor = None past_key_values: List[tf.Tensor] | None = None hidden_states: Tuple[tf.Tensor] | None = None attentions: Tuple[tf.Tensor] | None = None GPT2_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`GPT2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT2_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]`\n (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have\n their past given to this model should not be passed as input ids as they have already been computed.\n attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for\n `past_key_values`. In other words, the `attention_mask` always has to have the length:\n `len(past_key_values) + len(input_ids)`\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.', GPT2_START_DOCSTRING) class TFGPT2Model(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings('\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT2_START_DOCSTRING) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get('position_ids', None) attention_mask = kwargs.get('attention_mask', None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return {'input_ids': inputs, 'attention_mask': attention_mask, 'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids} @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) loss = None if labels is not None: shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFCausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings('\n The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\n input embeddings, the classification head takes as input the input of a specified classification token index in the\n input sequence).\n ', GPT2_START_DOCSTRING) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFGPT2MainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, mc_token_ids: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]: if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None transformer_outputs = self.transformer(input_ids=flat_input_ids, past_key_values=past_key_values, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) if return_dict and output_hidden_states: all_hidden_states = transformer_outputs.hidden_states[:-1] + (hidden_states,) else: all_hidden_states = None lm_logits = tf.matmul(hidden_states, self.transformer.wte.weights, transpose_b=True) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) if not return_dict: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFGPT2DoubleHeadsModelOutput(logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=all_hidden_states, attentions=transformer_outputs.attentions) @property def input_signature(self): return {'input_ids': tf.TensorSpec((None, None, None), tf.int32, name='input_ids'), 'attention_mask': tf.TensorSpec((None, None, None), tf.int32, name='attention_mask'), 'mc_token_ids': tf.TensorSpec((None, None), tf.int32, name='mc_token_ids')} def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'multiple_choice_head', None) is not None: with tf.name_scope(self.multiple_choice_head.name): self.multiple_choice_head.build(None) @add_start_docstrings('\n The GPT2 Model transformer with a sequence classification head on top (linear layer).\n\n [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPT2_START_DOCSTRING) class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = keras.layers.Dense(config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='score', use_bias=False) self.transformer = TFGPT2MainLayer(config, name='transformer') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint='microsoft/DialogRPT-updown', output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) logits_shape = shape_list(logits) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1 sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') loss = None if labels is not None: assert self.config.pad_token_id is not None or logits_shape[0] == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if not tf.is_tensor(sequence_lengths): in_logits = logits[0:logits_shape[0], sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'score', None) is not None: with tf.name_scope(self.score.name): self.score.build([None, None, self.config.n_embd]) if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) # File: transformers-main/src/transformers/models/gpt2/tokenization_gpt2.py """""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} @lru_cache() def bytes_to_unicode(): bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1)) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, add_bos_token=False, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token self.add_bos_token = add_bos_token with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, **kwargs) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): if self.add_bos_token: bos_token_ids = [self.bos_token_id] else: bos_token_ids = [] output = bos_token_ids + token_ids_0 if token_ids_1 is None: return output return output + bos_token_ids + token_ids_1 def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if not self.add_bos_token: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) def _tokenize(self, text): bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if is_split_into_words or add_prefix_space: text = ' ' + text return (text, kwargs) # File: transformers-main/src/transformers/models/gpt2/tokenization_gpt2_fast.py """""" import json from typing import Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpt2 import GPT2Tokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} class GPT2TokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = GPT2Tokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs) self.add_bos_token = kwargs.pop('add_bos_token', False) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.' return super()._encode_plus(*args, **kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/gpt2/tokenization_gpt2_tf.py import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from ...modeling_tf_utils import keras from .tokenization_gpt2 import GPT2Tokenizer class TFGPT2Tokenizer(keras.layers.Layer): def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int=None, pad_token_id: int=None): super().__init__() self.pad_token_id = pad_token_id self.max_length = max_length self.vocab = vocab self.merges = merges self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) @classmethod def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): merges = [' '.join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) @classmethod def from_config(cls, config): return cls(**config) def get_config(self): return {'vocab': self.vocab, 'merges': self.merges, 'max_length': self.max_length, 'pad_token_id': self.pad_token_id} def call(self, x, max_length: int=None): input_ids = self.tf_tokenizer(x) attention_mask = tf.ones_like(input_ids) if self.pad_token_id is not None: max_length = max_length if max_length is not None else self.max_length if max_length is not None: (input_ids, attention_mask) = pad_model_inputs(input_ids, max_seq_length=max_length, pad_value=self.pad_token_id) return {'attention_mask': attention_mask, 'input_ids': input_ids} # File: transformers-main/src/transformers/models/gpt_bigcode/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_gpt_bigcode': ['GPTBigCodeConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gpt_bigcode'] = ['GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel'] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class GPTBigCodeConfig(PretrainedConfig): model_type = 'gpt_bigcode' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_pytorch_tanh', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 self.multi_query = multi_query self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) # File: transformers-main/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py """""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_2_2 from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging from .configuration_gpt_bigcode import GPTBigCodeConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'bigcode/gpt_bigcode-santacoder' _CONFIG_FOR_DOC = 'GPTBigCodeConfig' @torch.jit.script def upcast_masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor): x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1) return x class GPTBigCodeAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config self.mask_value = None self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.kv_heads = 1 if self.multi_query else self.num_heads self.kv_dim = self.kv_heads * self.head_dim self.split_size = self.embed_dim self.is_causal = True if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention self.layer_idx = layer_idx self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32 self.attn_pdrop = config.attn_pdrop if self.is_cross_attention: if self.multi_query: raise NotImplementedError('Multi-Query Attention not supported for cross_attention') self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim) self.q_attn = nn.Linear(self.embed_dim, self.embed_dim) else: self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim) self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) def _get_mask_value(self, device, dtype): if self.mask_value is None or self.mask_value.dtype != dtype or self.mask_value.device != device: self.mask_value = torch.full([], torch.finfo(dtype).min, dtype=dtype, device=device) return self.mask_value def _attn(self, query, key, value, attention_mask=None, head_mask=None): dtype = query.dtype softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else dtype upcast = dtype != softmax_dtype unscale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1 scale_factor = unscale ** (-1) if self.scale_attn_weights: scale_factor /= self.head_dim ** 0.5 query_shape = query.shape batch_size = query_shape[0] key_length = key.size(-1) if self.multi_query: query_length = query_shape[1] attn_shape = (batch_size, query_length, self.num_heads, key_length) attn_view = (batch_size, query_length * self.num_heads, key_length) query = query.reshape(batch_size, query_length * self.num_heads, self.head_dim) else: query_length = query_shape[2] attn_shape = (batch_size, self.num_heads, query_length, key_length) attn_view = (batch_size * self.num_heads, query_length, key_length) query = query.reshape(batch_size * self.num_heads, query_length, self.head_dim) key = key.reshape(batch_size * self.num_heads, self.head_dim, key_length) attn_weights = torch.empty(attn_view, device=query.device, dtype=query.dtype) if query.device.type == 'cpu': attn_weights = torch.zeros_like(attn_weights) beta = 1 else: beta = 0 attn_weights = torch.baddbmm(attn_weights, query, key, beta=beta, alpha=scale_factor).view(attn_shape) if upcast: if attention_mask is None: attn_weights = upcast_softmax(attn_weights, unscale, softmax_dtype) else: mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) attn_weights = upcast_masked_softmax(attn_weights, attention_mask, mask_value, unscale, softmax_dtype) else: if attention_mask is not None: mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) attn_weights = torch.where(attention_mask, attn_weights, mask_value) attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: if self.multi_query: head_mask = head_mask.transpose(1, 2) attn_weights = attn_weights * head_mask if self.multi_query: attn_output = torch.bmm(attn_weights.view(attn_view), value).view(query_shape) else: attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def forward(self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]]]: if encoder_hidden_states is not None: if not hasattr(self, 'q_attn') or not self.is_cross_attention: raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: (query, key_value) = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: (query, key_value) = self.c_attn(hidden_states).view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim).transpose(1, 2).split((self.head_dim, 2 * self.head_dim), dim=3) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None (key, value) = key_value.split((self.head_dim, self.head_dim), dim=-1) (attn_output, attn_weights) = self._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) if not self.multi_query: attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: attn_weights = attn_weights.transpose(1, 2) outputs += (attn_weights,) return outputs class GPTBigCodeFlashAttention2(GPTBigCodeAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]]]: if encoder_hidden_states is not None: if not hasattr(self, 'q_attn') or not self.is_cross_attention: raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: (query, key_value) = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: (query, key_value) = self.c_attn(hidden_states).view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim).transpose(1, 2).split((self.head_dim, 2 * self.head_dim), dim=3) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None (key, value) = key_value.split((self.head_dim, self.head_dim), dim=-1) if self.multi_query: (batch_size, query_length, _) = query.shape query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim) key = key.unsqueeze(2) value = value.unsqueeze(2) else: query_length = query.shape[2] (batch_size, _, tgt, _) = key.shape query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim) key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) attn_dropout = self.attn_pdrop if self.training else 0.0 input_dtype = query.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.c_attn.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attn_output = _flash_attention_forward(query, key, value, attention_mask, query_length, dropout=attn_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) attn_output = self.c_proj(attn_weights_reshaped) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2) else: attn_weights_reshaped = None outputs += (attn_weights_reshaped,) return outputs class GPTBigCodeSdpaAttention(GPTBigCodeAttention): def _attn(self, query, key, value, attention_mask=None, head_mask=None): if head_mask is not None: raise ValueError('PyTorch SDPA does not support head_mask. Please open an issue in Transformers repository.') scale = None if not self.scale_attn_weights: scale = 1 query_shape = query.shape batch_size = query_shape[0] key.shape[-2] if self.multi_query: query_length = query_shape[1] query = query.view(batch_size, query_length, self.num_heads, self.head_dim).transpose(1, 2) key = key.unsqueeze(1) value = value.unsqueeze(1) if is_torch_greater_or_equal_than_2_2: key = key.expand(-1, self.num_heads, -1, -1) value = value.expand(-1, self.num_heads, -1, -1) else: query_length = query_shape[-1] if query.device.type == 'cuda' and attention_mask is not None: query = query.contiguous() key = key.contiguous() value = value.contiguous() is_causal = True if self.is_causal and attention_mask is None and (query_length > 1) else False sdpa_result = torch.nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=self.attn_pdrop if self.training else 0.0, is_causal=is_causal, scale=scale) if self.multi_query: sdpa_result = sdpa_result.transpose(1, 2) sdpa_result = sdpa_result.reshape(query_shape) return (sdpa_result, None) def forward(self, hidden_states: torch.Tensor, layer_past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]]]: if encoder_hidden_states is not None: if not hasattr(self, 'q_attn') or not self.is_cross_attention: raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) key_value = self.c_attn(encoder_hidden_states) attention_mask = encoder_attention_mask elif self.multi_query: (query, key_value) = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) else: (query, key_value) = self.c_attn(hidden_states).view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim).transpose(1, 2).split((self.head_dim, 2 * self.head_dim), dim=3) if layer_past is not None: key_value = torch.cat((layer_past, key_value), dim=-2) present = key_value if use_cache else None (key, value) = key_value.split((self.head_dim, self.head_dim), dim=-1) if not output_attentions and head_mask is None: (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) else: logger.warning_once('GPTBigCodeModel is using GPTBigCodeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` and `head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') (attn_output, attn_weights) = super()._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) if not self.multi_query: attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: if self.multi_query: attn_weights = attn_weights.transpose(1, 2) outputs += (attn_weights,) return outputs class GPTBigCodeMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states GPTBIGCODE_ATTENTION_CLASSES = {'eager': GPTBigCodeAttention, 'flash_attention_2': GPTBigCodeFlashAttention2, 'sdpa': GPTBigCodeSdpaAttention} class GPTBigCodeBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: if config.multi_query: raise NotImplementedError('Cross-attention not implemented for MQA') self.crossattention = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigCodeMLP(self.inner_dim, config) def forward(self, hidden_states: Optional[Tuple[torch.Tensor]], layer_past: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, **kwargs) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual if encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions) attn_output = cross_attn_outputs[0] hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class GPTBigCodePreTrainedModel(PreTrainedModel): config_class = GPTBigCodeConfig base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['GPTBigCodeBlock'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_sdpa = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)): module.c_proj.weight.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) module.c_proj._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPT_BIGCODE_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GPTBigCodeConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT_BIGCODE_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Tuple[torch.Tensor]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for\n `past_key_values`. In other words, the `attention_mask` always has to have the length:\n `len(past_key_values) + len(input_ids)`\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare GPT_BIGCODE Model transformer outputting raw hidden-states without any specific head on top.', GPT_BIGCODE_START_DOCSTRING) class GPTBigCodeModel(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) max_positions = config.max_position_embeddings self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False) self.gradient_checkpointing = False self._use_sdpa = config._attn_implementation == 'sdpa' self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if batch_size <= 0: raise ValueError('batch_size has to be defined and > 0') device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0].size(-2) if attention_mask is not None and len(attention_mask.shape) == 2 and (position_ids is None): position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_length > 0: position_ids = position_ids[:, past_length:input_shape[-1] + past_length] elif position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) query_length = input_shape[-1] key_length = past_length + query_length self_attention_mask = self.bias[None, key_length - query_length:key_length, :key_length] if self._use_flash_attention_2: attention_mask = attention_mask.bool() if attention_mask is not None and 0 in attention_mask else None encoder_attention_mask = encoder_attention_mask.bool() if encoder_attention_mask is not None and 0 in encoder_attention_mask else None else: if attention_mask is not None: self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to(dtype=torch.bool, device=self_attention_mask.device) self_attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1) if self._use_sdpa and head_mask is None and (not output_attentions): dtype = self.wte.weight.dtype min_dtype = torch.finfo(dtype).min self_attention_mask = torch.where(self_attention_mask, torch.full([], 0.0, dtype=dtype, device=self_attention_mask.device), torch.full([], min_dtype, dtype=dtype, device=self_attention_mask.device)) if self.multi_query: self_attention_mask = self_attention_mask.transpose(1, 2) if query_length > 1 and attention_mask is not None and (attention_mask.device.type == 'cuda'): self_attention_mask = AttentionMaskConverter._unmask_unattended(self_attention_mask, min_dtype=min_dtype) attention_mask = self_attention_mask if self.config.add_cross_attention and encoder_hidden_states is not None and (encoder_attention_mask is not None): if encoder_attention_mask.dim() == 2: encoder_attention_mask.unsqueeze(1) assert encoder_attention_mask.dim() == 3 encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) else: encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = [] if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions) else: outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache: presents.append(outputs[1]) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) @add_start_docstrings('\n The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT_BIGCODE_START_DOCSTRING) class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = GPTBigCodeModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: if self.config.multi_query: past_length = past_key_values[0].shape[1] else: past_length = past_key_values[0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} model_inputs.update({'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids}) return model_inputs def _get_initial_cache_position(self, input_ids, model_kwargs): past_length = 0 if 'past_key_values' in model_kwargs: if self.config.multi_query: past_length = model_kwargs['past_key_values'][0].shape[1] else: past_length = model_kwargs['past_key_values'][0].shape[2] if 'inputs_embeds' in model_kwargs: cur_len = model_kwargs['inputs_embeds'].shape[1] else: cur_len = input_ids.shape[-1] model_kwargs['cache_position'] = torch.arange(past_length, cur_len, device=input_ids.device) return model_kwargs @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(shift_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values)) @add_start_docstrings('\n The GPTBigCode Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPT_BIGCODE_START_DOCSTRING) class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] assert self.config.pad_token_id is not None or batch_size == 1, 'Cannot handle batch sizes > 1 if no padding token is defined.' if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n GPT_BIGCODE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', GPT_BIGCODE_START_DOCSTRING) class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/gpt_neo/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _import_structure = {'configuration_gpt_neo': ['GPTNeoConfig', 'GPTNeoOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gpt_neo'] = ['GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_gpt_neo'] = ['FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel'] if TYPE_CHECKING: from .configuration_gpt_neo import GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt_neo/configuration_gpt_neo.py """""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging logger = logging.get_logger(__name__) class GPTNeoConfig(PretrainedConfig): model_type = 'gpt_neo' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__(self, vocab_size=50257, max_position_embeddings=2048, hidden_size=2048, num_layers=24, attention_types=[[['global', 'local'], 12]], num_heads=16, intermediate_size=None, window_size=256, activation_function='gelu_new', resid_dropout=0.0, embed_dropout=0.0, attention_dropout=0.0, classifier_dropout=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=50256, eos_token_id=50256, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_layers = num_layers self.num_heads = num_heads self.intermediate_size = intermediate_size self.window_size = window_size self.activation_function = activation_function self.resid_dropout = resid_dropout self.embed_dropout = embed_dropout self.attention_dropout = attention_dropout self.classifier_dropout = classifier_dropout self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.attention_types = attention_types self.attention_layers = self.expand_attention_types_params(attention_types) if len(self.attention_layers) != self.num_layers: raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.attention_layers)` == `config.num_layers` but is `len(config.attention_layers) = {len(self.attention_layers)}`, `config.num_layers = {self.num_layers}`. `config.attention_layers` is prepared using `config.attention_types`. Please verify the value of `config.attention_types` argument.') super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @staticmethod def expand_attention_types_params(attention_types): attentions = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def custom_unfold(input, dimension, size, step): import torch shape = input.size() rank = len(shape) sizedim = shape[dimension] low_indices = torch.arange(0, sizedim, step) min_length = torch.div(sizedim - size, step, rounding_mode='floor') + 1 indices = torch.arange(size) + low_indices[:min_length][:, None] s = [slice(None)] * rank s[dimension] = indices sliced = input[s] perm = list(range(0, rank + 1)) perm.append(perm.pop(dimension + 1)) return sliced.permute(perm) def custom_get_block_length_and_num_blocks(seq_length, window_size): import torch candidates = torch.arange(1, window_size) remainders = torch.remainder(seq_length, candidates) divisor_indices = remainders == 0 divisors = candidates[divisor_indices] largest_divisor = torch.max(divisors) return (largest_divisor, torch.div(seq_length, largest_divisor, rounding_mode='floor')) class GPTNeoOnnxConfig(OnnxConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'} else: common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'} return common_inputs @property def num_attention_heads(self) -> int: return self._config.num_heads def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']}) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 past_shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads) ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)] ordered_inputs['attention_mask'] = common_inputs['attention_mask'] if self.use_past: mask_dtype = ordered_inputs['attention_mask'].dtype ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13 # File: transformers-main/src/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py """""" import argparse import json from transformers import GPTNeoConfig, GPTNeoForCausalLM, load_tf_weights_in_gpt_neo from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): config_json = json.load(open(config_file, 'r')) config = GPTNeoConfig(hidden_size=config_json['n_embd'], num_layers=config_json['n_layer'], num_heads=config_json['n_head'], attention_types=config_json['attention_types'], max_position_embeddings=config_json['n_positions'], resid_dropout=config_json['res_dropout'], embed_dropout=config_json['embed_dropout'], attention_dropout=config_json['attn_dropout']) print(f'Building PyTorch model from configuration: {config}') model = GPTNeoForCausalLM(config) load_tf_weights_in_gpt_neo(model, config, tf_checkpoint_path) print(f'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(pytorch_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained mesh-tf model. \nThis specifies the model architecture.') parser.add_argument('--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) # File: transformers-main/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gpt_neo import GPTNeoConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GPTNeoConfig' _CHECKPOINT_FOR_DOC = 'EleutherAI/gpt-neo-1.3B' GPT_NEO_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' GPT_NEO_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class FlaxGPTNeoSelfAttention(nn.Module): config: GPTNeoConfig attention_type: str dtype: jnp.dtype = jnp.float32 def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.attn_dropout = nn.Dropout(config.attention_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) dense = partial(nn.Dense, self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) (self.q_proj, self.k_proj, self.v_proj) = (dense(use_bias=False), dense(use_bias=False), dense(use_bias=False)) self.out_proj = dense() self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype='bool'), dtype='bool') if self.attention_type == 'local': self.causal_mask = self.causal_mask ^ jnp.tril(self.causal_mask, -config.window_size) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): query = self.q_proj(hidden_states) * jnp.sqrt(self.head_dim).astype(self.dtype) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) (query_length, key_length) = (query.shape[1], key.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng('dropout') if self.has_variable('cache', 'cached_key') or init_cache: (key, value, attention_mask) = self._concatenate_to_cache(key, value, query, attention_mask) attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPTNeoAttention(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): attention_type = self.config.attention_layers[self.layer_id] self.attention = FlaxGPTNeoSelfAttention(self.config, attention_type, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): return self.attention(hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) class FlaxGPTNeoMLP(nn.Module): config: GPTNeoConfig intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.c_fc = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) self.c_proj = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_dropout) def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPTNeoBlock(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPTNeoAttention(self.config, layer_id=self.layer_id, dtype=self.dtype) self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxGPTNeoMLP(self.config, inner_dim, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): residual = hidden_states hidden_states = self.ln_1(hidden_states) outputs = self.attn(hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) attn_output = outputs[0] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) hidden_states = residual + feed_forward_hidden_states return (hidden_states,) + outputs[1:] class FlaxGPTNeoPreTrainedModel(FlaxPreTrainedModel): config_class = GPTNeoConfig base_model_prefix = 'transformer' module_class: nn.Module = None def __init__(self, config: GPTNeoConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return unfreeze(init_variables['cache']) @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) def __call__(self, input_ids, attention_mask=None, position_ids=None, params: dict=None, past_key_values: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict (batch_size, sequence_length) = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `position_ids` when passing `past_key_values`.') position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxGPTNeoBlockCollection(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [FlaxGPTNeoBlock(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block(hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGPTNeoModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) self.wte = nn.Embed(self.config.vocab_size, self.embed_dim, embedding_init=embedding_init) self.wpe = nn.Embed(self.config.max_position_embeddings, self.embed_dim, embedding_init=embedding_init) self.dropout = nn.Dropout(rate=self.config.embed_dropout) self.h = FlaxGPTNeoBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, deterministic=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): input_embeds = self.wte(input_ids.astype('i4')) position_embeds = self.wpe(position_ids.astype('i4')) hidden_states = input_embeds + position_embeds hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.h(hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1]) @add_start_docstrings('The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top.', GPT_NEO_START_DOCSTRING) class FlaxGPTNeoModel(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoModule append_call_sample_docstring(FlaxGPTNeoModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxGPTNeoForCausalLMModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPTNeoModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.transformer(input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables['params']['wte']['embedding'].T lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT_NEO_START_DOCSTRING) class FlaxGPTNeoForCausalLM(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/gpt_neo/modeling_gpt_neo.py """""" import os from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_torch_fx_available, logging from .configuration_gpt_neo import GPTNeoConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward if is_torch_fx_available(): if not is_torch_greater_or_equal_than_1_13: import torch.fx _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GPTNeoConfig' _CHECKPOINT_FOR_DOC = 'EleutherAI/gpt-neo-1.3B' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): try: import re import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(gpt_neo_checkpoint_path) logger.info(f'Converting TensorFlow checkpoint from {tf_path}') init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: if 'global_step' not in name and 'adam' not in name: array = tf.train.load_variable(tf_path, name) array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy() name = name.replace('attn/q', 'attn/attention/q_proj/w') name = name.replace('attn/k', 'attn/attention/k_proj/w') name = name.replace('attn/v', 'attn/attention/v_proj/w') name = name.replace('attn/o', 'attn/attention/out_proj/w') name = name.replace('norm_1', 'ln_1') name = name.replace('norm_2', 'ln_2') name = name.replace('attn/compute_output_bias/o_b', 'attn/attention/out_proj/b') name = name.replace('conv1d_main/c_fc/kernel', 'c_fc/w') name = name.replace('conv1d_main/c_fc/bias', 'c_fc/b') name = name.replace('conv1d_main/c_proj/kernel', 'c_proj/w') name = name.replace('conv1d_main/c_proj/bias', 'c_proj/b') names.append(name) arrays.append(array) for (name, array) in zip(names, arrays): name = name[5:] name = name.split('/') pointer = model.transformer for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): scope_names = re.split('(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'w' or scope_names[0] == 'g': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'b': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'wpe' or scope_names[0] == 'wte': pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if name[-1] == 'w' and name[-2] in ['out_proj', 'k_proj', 'q_proj', 'v_proj', 'c_proj', 'c_fc']: array = array.transpose() if name == ['wte']: array = array[:config.vocab_size] if pointer.shape != array.shape: raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}') print(f'Initialize PyTorch weight {name}') pointer.data = torch.from_numpy(array) embs = model.transformer.wte.weight lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False) lin.weight = embs model.set_output_embeddings(lin) return model class GPTNeoSelfAttention(nn.Module): def __init__(self, config, attention_type, layer_id=None): super().__init__() self.config = config max_positions = config.max_position_embeddings bias = torch.tril(torch.ones((max_positions, max_positions), dtype=bool)).view(1, 1, max_positions, max_positions) if attention_type == 'local': bias = torch.bitwise_xor(bias, torch.tril(bias, -config.window_size)) self.register_buffer('bias', bias, persistent=False) self.register_buffer('masked_bias', torch.tensor(-1000000000.0), persistent=False) self.attn_dropout = nn.Dropout(float(config.attention_dropout)) self.resid_dropout = nn.Dropout(float(config.resid_dropout)) self.is_causal = True self.layer_id = layer_id self.embed_dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor.permute(0, 2, 1, 3) def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def _attn(self, query, key, value, attention_mask=None, head_mask=None): query = query.to(torch.float32) key = key.to(torch.float32) attn_weights = torch.matmul(query, key.transpose(-1, -2)) (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def forward(self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_id, cache_kwargs) (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return outputs class GPTNeoFlashAttention2(GPTNeoSelfAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states, attention_mask=None, layer_past=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): (bsz, _, _) = hidden_states.size() query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: cache_kwargs = {'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_id, cache_kwargs) query_length = query.shape[2] tgt_len = key.shape[2] query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) attn_dropout = self.config.attention_dropout if self.training else 0.0 if attention_mask is not None: attention_mask = attention_mask[:, :, :, :key.shape[-2]] if query.dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attn_output = _flash_attention_forward(query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=1.0, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) attn_output = self.out_proj(attn_weights_reshaped) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights_reshaped,) return outputs GPT_NEO_ATTENTION_CLASSES = {'eager': GPTNeoSelfAttention, 'flash_attention_2': GPTNeoFlashAttention2} class GPTNeoAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attention_layers = config.attention_layers self.attention_type = self.attention_layers[layer_id] if self.attention_type in ['global', 'local']: self.attention = GPT_NEO_ATTENTION_CLASSES[config._attn_implementation](config, self.attention_type, layer_id) else: raise NotImplementedError(f"Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: {config.attention_layers}. Select attn layer types from ['global', 'local'] only.") def forward(self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): return self.attention(hidden_states, attention_mask=attention_mask, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) class GPTNeoMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(float(config.resid_dropout)) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTNeoBlock(nn.Module): def __init__(self, config, layer_id=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTNeoAttention(config, layer_id) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTNeoMLP(inner_dim, config) def forward(self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, use_cache=False, output_attentions=False, cache_position=None): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class GPTNeoPreTrainedModel(PreTrainedModel): config_class = GPTNeoConfig load_tf_weights = load_tf_weights_in_gpt_neo base_model_prefix = 'transformer' supports_gradient_checkpointing = True _no_split_modules = ['GPTNeoBlock'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = False def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): if isinstance(module, (nn.Linear,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPT_NEO_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT_NEO_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.', GPT_NEO_START_DOCSTRING) class GPTNeoModel(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(float(config.embed_dropout)) self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.wte(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)) and (not self.training): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') seq_length = inputs_embeds.shape[1] if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) head_mask = self.get_head_mask(head_mask, self.config.num_layers) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, seq_length) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1, seq_length, hidden_states.size(-1)) next_decoder_cache = None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, block) in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, causal_mask, head_mask[i], use_cache, output_attentions, cache_position) else: outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = outputs[0] if use_cache: next_decoder_cache = outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('\n The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', GPT_NEO_START_DOCSTRING) class GPTNeoForCausalLM(GPTNeoPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = GPTNeoModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, past_key_values=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) lm_logits = lm_logits.to(torch.float32) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) lm_logits = lm_logits.to(hidden_states.dtype) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('\n The GPTNeo Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTNeoForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPT_NEO_START_DOCSTRING) class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n GPT Neo model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', GPT_NEO_START_DOCSTRING) class GPTNeoForTokenClassification(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint='EleutherAI/gpt-neo-125m', output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The GPT-Neo Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', GPT_NEO_START_DOCSTRING) class GPTNeoForQuestionAnswering(GPTNeoPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTNeoModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gpt_neox/__init__.py from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable _import_structure = {'configuration_gpt_neox': ['GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gpt_neox_fast'] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gpt_neox'] = ['GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel'] if TYPE_CHECKING: from .configuration_gpt_neox import GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt_neox/configuration_gpt_neox.py """""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class GPTNeoXConfig(PretrainedConfig): model_type = 'gpt_neox' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=50432, hidden_size=6144, num_hidden_layers=44, num_attention_heads=64, intermediate_size=24576, hidden_act='gelu', rotary_pct=0.25, rotary_emb_base=10000, attention_dropout=0.0, hidden_dropout=0.0, classifier_dropout=0.1, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, use_parallel_residual=True, rope_scaling=None, attention_bias=True, **kwargs): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.partial_rotary_factor = rotary_pct self.rotary_emb_base = rotary_emb_base self.rope_theta = rotary_emb_base self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout self.classifier_dropout = classifier_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings self.use_parallel_residual = use_parallel_residual self.rope_scaling = rope_scaling self.attention_bias = attention_bias if self.rope_scaling is not None and 'type' in self.rope_scaling: self.rope_scaling['rope_type'] = self.rope_scaling['type'] rope_config_validation(self) if self.hidden_size % self.num_attention_heads != 0: raise ValueError('The hidden size is not divisble by the number of attention heads! Make sure to update them!') # File: transformers-main/src/transformers/models/gpt_neox/modeling_gpt_neox.py """""" from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...utils import get_torch_version, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging from .configuration_gpt_neox import GPTNeoXConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'trl-internal-testing/tiny-random-GPTNeoXForCausalLM' _REAL_CHECKPOINT_FOR_DOC = 'EleutherAI/gpt-neox-20b' _CONFIG_FOR_DOC = 'GPTNeoXConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class GPTNeoXPreTrainedModel(PreTrainedModel): config_class = GPTNeoXConfig base_model_prefix = 'gpt_neox' supports_gradient_checkpointing = True _no_split_modules = ['GPTNeoXLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class GPTNeoXAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.config = config self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size if self.hidden_size % self.num_attention_heads != 0: raise ValueError('The hidden size is not divisble by the number of attention heads! Make sure to update them') self.head_size = self.hidden_size // self.num_attention_heads self.rotary_ndims = int(self.head_size * config.rotary_pct) self.rope_theta = config.rotary_emb_base self._init_bias(config.max_position_embeddings) self.register_buffer('masked_bias', torch.tensor(-1000000000.0), persistent=False) self.rotary_emb = GPTNeoXRotaryEmbedding(config=self.config) if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.norm_factor = self.head_size ** (-0.5) self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.attention_bias) self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.attention_dropout = nn.Dropout(config.attention_dropout) self.is_causal = True self.layer_idx = layer_idx def _init_bias(self, max_positions, device=None): self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False) if device is not None: self.bias = self.bias.to(device) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor]=None, layer_past: Optional[Cache]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, padding_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): (query, key, value, present) = self._attn_projections_and_rope(hidden_states=hidden_states, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache, position_embeddings=position_embeddings) (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) tensor = tensor.view(new_shape) tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) return tensor def _attn_projections_and_rope(self, hidden_states: torch.FloatTensor, position_ids: torch.LongTensor, layer_past: Optional[Tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): qkv = self.query_key_value(hidden_states) new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) query = qkv[..., :self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size:2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size:].permute(0, 2, 1, 3) query_rot = query[..., :self.rotary_ndims] query_pass = query[..., self.rotary_ndims:] key_rot = key[..., :self.rotary_ndims] key_pass = key[..., self.rotary_ndims:] if position_embeddings is None: logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.') (cos, sin) = self.rotary_emb(value, position_ids) else: (cos, sin) = position_embeddings (query, key) = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) if layer_past is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_ndims, 'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_idx, cache_kwargs) return (query, key, value, layer_past) def _attn(self, query, key, value, attention_mask=None, head_mask=None): (batch_size, num_attention_heads, query_length, attn_head_size) = query.size() key_length = key.size(-2) if key_length > self.bias.shape[-1]: self._init_bias(key_length, device=key.device) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) attn_scores = torch.zeros(batch_size * num_attention_heads, query_length, key_length, dtype=query.dtype, device=key.device) attn_scores = torch.baddbmm(attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=self.norm_factor) attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length) mask_value = torch.finfo(attn_scores.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device) attn_scores = torch.where(causal_mask, attn_scores, mask_value) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key.shape[-2]] attn_scores = attn_scores + causal_mask attn_weights = nn.functional.softmax(attn_scores, dim=-1) attn_weights = attn_weights.to(value.dtype) if head_mask is not None: attn_weights = attn_weights * head_mask attn_weights = self.attention_dropout(attn_weights) attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) class GPTNeoXFlashAttention2(GPTNeoXAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor]=None, layer_past: Optional[Cache]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): (query, key, value, present) = self._attn_projections_and_rope(hidden_states=hidden_states, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) query_length = query.shape[-2] target_dtype = value.dtype if query.dtype != target_dtype: query = query.to(target_dtype) if key.dtype != target_dtype: key = key.to(target_dtype) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) input_dtype = query.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query_key_value.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attention_dropout = self.config.attention_dropout if self.training else 0.0 attn_weights = _flash_attention_forward(query, key, value, attention_mask, query_length, dropout=attention_dropout, softmax_scale=self.norm_factor, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_weights.reshape(attn_weights.shape[0], attn_weights.shape[1], self.num_attention_heads * self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return outputs class GPTNeoXSdpaAttention(GPTNeoXAttention): def __init__(self, config, layer_idx=None): super().__init__(config, layer_idx=layer_idx) self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse('2.2.0') def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor]=None, layer_past: Optional[Tuple[torch.Tensor]]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): if output_attentions or head_mask is not None: logger.warning_once('`GPTNeoXSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, layer_past=layer_past, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) (bsz, q_len, _) = hidden_states.size() (query, key, value, present) = self._attn_projections_and_rope(hidden_states=hidden_states, position_ids=position_ids, layer_past=layer_past, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key.shape[-2]] target_dtype = value.dtype if query.dtype != target_dtype: query = query.to(target_dtype) if key.dtype != target_dtype: key = key.to(target_dtype) if self.require_contiguous_qkv and query.device.type == 'cuda' and (attention_mask is not None): query = query.contiguous() key = key.contiguous() value = value.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query=query, key=key, value=value, attn_mask=causal_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.dense(attn_output) return (attn_output, present, None) def attention_mask_func(attention_scores, ltor_mask): attention_scores.masked_fill_(~ltor_mask, torch.finfo(attention_scores.dtype).min) return attention_scores class GPTNeoXRotaryEmbedding(nn.Module): def __init__(self, dim=None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, rope_type='default', config: Optional[GPTNeoXConfig]=None): super().__init__() self.rope_kwargs = {} if config is None: logger.warning_once('`GPTNeoXRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46') self.rope_kwargs = {'rope_type': rope_type, 'factor': scaling_factor, 'dim': dim, 'base': base, 'max_position_embeddings': max_position_embeddings} self.rope_type = rope_type self.max_seq_len_cached = max_position_embeddings self.original_max_seq_len = max_position_embeddings else: if config.rope_scaling is not None: self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, seq_len=seq_len, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if 'dynamic' in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() cos = cos * self.attention_scaling sin = sin * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) class GPTNeoXLinearScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): def __init__(self, *args, **kwargs): logger.warning_once('`GPTNeoXLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use `GPTNeoXRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__).') kwargs['rope_type'] = 'linear' super().__init__(*args, **kwargs) class GPTNeoXDynamicNTKScalingRotaryEmbedding(GPTNeoXRotaryEmbedding): def __init__(self, *args, **kwargs): logger.warning_once('`GPTNeoXDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use `GPTNeoXRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to __init__).') kwargs['rope_type'] = 'dynamic' super().__init__(*args, **kwargs) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class GPTNeoXMLP(nn.Module): def __init__(self, config): super().__init__() self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size) self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states GPT_NEOX_ATTENTION_CLASSES = {'eager': GPTNeoXAttention, 'flash_attention_2': GPTNeoXFlashAttention2, 'sdpa': GPTNeoXSdpaAttention} class GPTNeoXLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_dropout = nn.Dropout(config.hidden_dropout) self.post_mlp_dropout = nn.Dropout(config.hidden_dropout) self.attention = GPT_NEOX_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = GPTNeoXMLP(config) def forward(self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, layer_past: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): attention_layer_outputs = self.attention(self.input_layernorm(hidden_states), attention_mask=attention_mask, position_ids=position_ids, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings) attn_output = attention_layer_outputs[0] attn_output = self.post_attention_dropout(attn_output) outputs = attention_layer_outputs[1:] if self.use_parallel_residual: mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output + hidden_states else: attn_output = attn_output + hidden_states mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) mlp_output = self.post_mlp_dropout(mlp_output) hidden_states = mlp_output + attn_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs GPT_NEOX_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`~GPTNeoXConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT_NEOX_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare GPTNeoX Model transformer outputting raw hidden-states without any specific head on top.', GPT_NEOX_START_DOCSTRING) class GPTNeoXModel(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) self.emb_dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([GPTNeoXLayer(config, i) for i in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = GPTNeoXRotaryEmbedding(config=config) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embed_in def set_input_embeddings(self, value): self.embed_in = value @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') seq_length = inputs_embeds.shape[1] if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = self.emb_dropout(inputs_embeds) position_embeddings = self.rotary_emb(hidden_states, position_ids) next_decoder_cache = None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, causal_mask, position_ids, head_mask[i], use_cache, None, output_attentions, cache_position, position_embeddings) else: outputs = layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], layer_past=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.', GPT_NEOX_START_DOCSTRING) class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel): _tied_weights_keys = ['embed_out.weight'] def __init__(self, config): super().__init__(config) self.gpt_neox = GPTNeoXModel(config) self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.embed_out def set_output_embeddings(self, new_embeddings): self.embed_out = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithPast(loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.embed_out.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past @add_start_docstrings('\n The GPTNeoX Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTNeoXForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPT_NEOX_START_DOCSTRING) class GPTNeoXForSequenceClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] logits = self.score(hidden_states) if input_ids is not None: (batch_size, sequence_length) = input_ids.shape[:2] else: (batch_size, sequence_length) = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class GPTNeoXForTokenClassification(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.dropout = nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint='LarsJonasson/pythia-410m-deduped-sft-swedish', output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_loss=0.25) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, TokenClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n The GPT-NeoX Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', GPT_NEOX_START_DOCSTRING) class GPTNeoXForQuestionAnswering(GPTNeoXPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.gpt_neox = GPTNeoXModel(config) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.post_init() @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gpt_neox/tokenization_gpt_neox_fast.py """""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} class GPTNeoXTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_bos_token=False, add_eos_token=False, add_prefix_space=False, **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, add_prefix_space=add_prefix_space, **kwargs) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space: pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type')) pre_tok_state['add_prefix_space'] = add_prefix_space self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) self.add_prefix_space = add_prefix_space @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def update_post_processor(self): bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError('add_bos_token = True but bos_token = None') eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError('add_eos_token = True but eos_token = None') single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}" pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + [0] * len(token_ids_0) + eos_token_id return bos_token_id + [0] * len(token_ids_0) + eos_token_id + bos_token_id + [0] * len(token_ids_1) + eos_token_id def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/gpt_neox_japanese/__init__.py from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _import_structure = {'configuration_gpt_neox_japanese': ['GPTNeoXJapaneseConfig'], 'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gpt_neox_japanese'] = ['GPTNeoXJapaneseForCausalLM', 'GPTNeoXJapaneseLayer', 'GPTNeoXJapaneseModel', 'GPTNeoXJapanesePreTrainedModel'] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py """""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class GPTNeoXJapaneseConfig(PretrainedConfig): model_type = 'gpt_neox_japanese' def __init__(self, vocab_size=32000, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, intermediate_multiple_size=4, hidden_act='gelu', rotary_pct=1.0, rotary_emb_base=10000, max_position_embeddings=2048, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, bos_token_id=31996, eos_token_id=31999, rope_scaling=None, attention_dropout=0.1, hidden_dropout=0.0, **kwargs): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_multiple_size = intermediate_multiple_size self.hidden_act = hidden_act self.rotary_pct = rotary_pct self.partial_rotary_factor = rotary_pct self.rotary_emb_base = rotary_emb_base self.rope_theta = rotary_emb_base self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_scaling = rope_scaling self.attention_dropout = attention_dropout self.hidden_dropout = hidden_dropout if self.rope_scaling is not None and 'type' in self.rope_scaling: self.rope_scaling['rope_type'] = self.rope_scaling['type'] rope_config_validation(self) # File: transformers-main/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'abeja/gpt-neox-japanese-2.7b' _CONFIG_FOR_DOC = 'GPTNeoXJapaneseConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class GPTNeoXJapanesePreTrainedModel(PreTrainedModel): config_class = GPTNeoXJapaneseConfig base_model_prefix = 'gpt_neox_japanese' _no_split_modules = ['GPTNeoXJapaneseLayer'] _skip_keys_device_placement = 'past_key_values' _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class GPTNeoXJapaneseAttention(nn.Module): def __init__(self, config, use_bias=False, layer_idx=None): super().__init__() self.num_attention_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_attention_heads if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.layer_idx = layer_idx self.rotary_ndims = int(self.head_size * config.rotary_pct) self.rope_theta = config.rotary_emb_base self.rotary_emb = GPTNeoXJapaneseRotaryEmbedding(config=config) self.attention_dropout = nn.Dropout(config.attention_dropout) self.norm_factor = math.sqrt(self.head_size) self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False) self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.use_bias = use_bias self.dense_bias = nn.Parameter(torch.zeros(config.hidden_size)) if use_bias else None def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, position_ids: torch.LongTensor, head_mask: Optional[torch.FloatTensor]=None, layer_past: Optional[Cache]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): qkv = self.query_key_value(hidden_states) new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) qkv = qkv.view(*new_qkv_shape) query = qkv[..., :self.head_size].permute(0, 2, 1, 3) key = qkv[..., self.head_size:2 * self.head_size].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_size:].permute(0, 2, 1, 3) query_rot = query[..., :self.rotary_ndims] query_pass = query[..., self.rotary_ndims:] key_rot = key[..., :self.rotary_ndims] key_pass = key[..., self.rotary_ndims:] if position_embeddings is None: logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.') (cos, sin) = self.rotary_emb(value, position_ids) else: (cos, sin) = position_embeddings (query, key) = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) if layer_past is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_ndims, 'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_idx, cache_kwargs) (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) attn_output = self.dense(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return (outputs, self.dense_bias) @classmethod def _split_heads(cls, tensor, num_attention_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) tensor = tensor.view(new_shape) tensor = tensor.permute(0, 2, 1, 3) return tensor @classmethod def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) return tensor def _attn(self, query, key, value, attention_mask=None, head_mask=None): (batch_size, num_attention_heads, query_length, attn_head_size) = query.size() key_length = key.size(-2) query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) attn_scores = torch.zeros(batch_size * num_attention_heads, query_length, key_length, dtype=query.dtype, device=key.device) attention_scores = torch.baddbmm(attn_scores, query, key.transpose(1, 2), beta=1.0, alpha=1.0 / self.norm_factor) attention_scores = attention_scores.view(batch_size, num_attention_heads, query_length, -1) if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key.shape[-2]] attention_scores = attention_scores + causal_mask attn_weights = nn.functional.softmax(attention_scores, dim=-1) attn_weights = self.attention_dropout(attn_weights) attn_weights = attn_weights.to(value.dtype) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) class GPTNeoXJapaneseRotaryEmbedding(nn.Module): def __init__(self, dim=None, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0, rope_type='default', config: Optional[GPTNeoXJapaneseConfig]=None): super().__init__() self.rope_kwargs = {} if config is None: logger.warning_once('`GPTNeoXJapaneseRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46') self.rope_kwargs = {'rope_type': rope_type, 'factor': scaling_factor, 'dim': dim, 'base': base, 'max_position_embeddings': max_position_embeddings} self.rope_type = rope_type self.max_seq_len_cached = max_position_embeddings self.original_max_seq_len = max_position_embeddings else: if config.rope_scaling is not None: self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, seq_len=seq_len, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if 'dynamic' in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() cos = cos * self.attention_scaling sin = sin * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor: if bias is not None: x = x + bias out = torch.nn.functional.dropout(x, p=prob, training=training) if residual is not None: out = residual + out return out class GPTNeoXJapaneseMLP(nn.Module): def __init__(self, config): super().__init__() intermediate_size = int(config.hidden_size * config.intermediate_multiple_size) self.dense_h_to_4h = nn.Linear(config.hidden_size, intermediate_size, bias=False) self.dense_4h_to_h = nn.Linear(intermediate_size, config.hidden_size, bias=False) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): intermediate = self.dense_h_to_4h(hidden_states) intermediate = self.act(intermediate) output = self.dense_4h_to_h(intermediate) return output class GPTNeoXJapaneseLayer(nn.Module): def __init__(self, config, layer_number): super().__init__() self.layer_number = layer_number self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = GPTNeoXJapaneseAttention(config=config, use_bias=layer_number == config.num_hidden_layers - 1, layer_idx=layer_number) self.mlp = GPTNeoXJapaneseMLP(config) self.hidden_dropout = config.hidden_dropout def forward(self, hidden_states: Optional[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, layer_past: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None): residual = hidden_states ln_out = self.input_layernorm(hidden_states) (attention_layer_outputs, attn_bias) = self.attention(ln_out, attention_mask=attention_mask, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, position_ids=position_ids, cache_position=cache_position, position_embeddings=position_embeddings) attn_output = attention_layer_outputs[0] outputs = attention_layer_outputs[1:] attn_output = bias_dropout_add(attn_output, bias=attn_bias.expand_as(residual) if attn_bias is not None else attn_bias, residual=residual, prob=self.hidden_dropout, training=self.training) mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) attn_output = bias_dropout_add(mlp_output, bias=None, residual=attn_output, prob=self.hidden_dropout, training=self.training) if use_cache: outputs = (attn_output,) + outputs else: outputs = (attn_output,) + outputs[1:] return outputs GPT_NEOX_JAPANESE_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`~GPTNeoXJapaneseConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPT_NEOX_JAPANESE_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`].\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance;\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare GPTNeoXJapanese Model transformer outputting raw hidden-states without any specific head on top.', GPT_NEOX_JAPANESE_START_DOCSTRING) class GPTNeoXJapaneseModel(GPTNeoXJapanesePreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) self.layers = nn.ModuleList([GPTNeoXJapaneseLayer(config=config, layer_number=i) for i in range(config.num_hidden_layers)]) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = GPTNeoXJapaneseRotaryEmbedding(config=config) self.post_init() def get_input_embeddings(self): return self.embed_in def set_input_embeddings(self, value): self.embed_in = value @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if inputs_embeds is None: inputs_embeds = self.embed_in(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') seq_length = inputs_embeds.shape[1] if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) next_decoder_cache = None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], layer_past=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.final_layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('GPTNeoXJapanese Model with a `language modeling` head on top for Classifier Model fine-tuning.', GPT_NEOX_JAPANESE_START_DOCSTRING) class GPTNeoXJapaneseForCausalLM(GPTNeoXJapanesePreTrainedModel): _tied_weights_keys = ['embed_out.weight'] def __init__(self, config): super().__init__(config) self.config = config self.gpt_neox_japanese = GPTNeoXJapaneseModel(config) self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_output_embeddings(self): return self.embed_out def set_output_embeddings(self, new_embeddings): self.embed_out = new_embeddings @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.FloatTensor]]]]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.gpt_neox_japanese(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] lm_logits = self.embed_out(hidden_states) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return (lm_loss,) + output if lm_loss is not None else output return CausalLMOutputWithPast(loss=lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.embed_out.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])) + layer_past[2:],) return reordered_past # File: transformers-main/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py """""" import collections import json import os import re from typing import Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} def load_vocab_and_emoji(vocab_file, emoji_file): with open(emoji_file, 'r', encoding='utf-8') as f: emoji = json.loads(f.read()) vocab = collections.OrderedDict() raw_vocab = collections.OrderedDict() ids_to_tokens = collections.OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: token = f.readlines() token = [[t.rstrip('\n')] if t == ',' or ',' not in t else t.rstrip('\n').split(',') for t in token] for (idx, b) in enumerate(token): ids_to_tokens[idx] = b raw_vocab[','.join(b)] = idx for wd in b: vocab[wd] = idx return (vocab, raw_vocab, ids_to_tokens, emoji) class GPTNeoXJapaneseTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, emoji_file, unk_token='<|endoftext|>', pad_token='<|endoftext|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', do_clean_text=False, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") if not os.path.isfile(emoji_file): raise ValueError(f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.do_clean_text = do_clean_text (self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji) = load_vocab_and_emoji(vocab_file, emoji_file) self.subword_tokenizer = SubWordJapaneseTokenizer(vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji) super().__init__(unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, do_clean_text=do_clean_text, **kwargs) @property def vocab_size(self): return len(self.raw_vocab) def get_vocab(self): return dict(self.raw_vocab, **self.added_tokens_encoder) def _tokenize(self, text): return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) def _convert_token_to_id(self, token): return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): return self.subword_tokenizer.convert_id_to_token(index) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) emoji_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] emoji_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] with open(vocab_file, 'w', encoding='utf-8') as writer: for (token_index, token) in self.ids_to_tokens.items(): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(','.join(token) + '\n') index += 1 with open(emoji_file, 'w', encoding='utf-8') as writer: json.dump(self.emoji, writer) return (vocab_file, emoji_file) class SubWordJapaneseTokenizer: def __init__(self, vocab, ids_to_tokens, emoji): self.vocab = vocab self.ids_to_tokens = ids_to_tokens self.emoji = emoji self.maxlen = np.max([len(w) for w in self.vocab.keys()]) self.content_repatter1 = re.compile("(https?|ftp)(:\\/\\/[-_\\.!~*\\'()a-zA-Z0-9;\\/?:\\@&=\\+$,%#]+)") self.content_repatter2 = re.compile('[A-Za-z0-9\\._+]*@[\\-_0-9A-Za-z]+(\\.[A-Za-z]+)*') self.content_repatter3 = re.compile('[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}') self.content_repatter4 = re.compile('([12]\\d{3}[/\\-年])*(0?[1-9]|1[0-2])[/\\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') self.content_repatter5 = re.compile('(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\\u32ff)\\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*') self.content_repatter6 = re.compile('((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*億)*((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*万)*((0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*千)*(0|[1-9]\\d*|[1-9]\\d{0,2}(,\\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\\(税込\\)|\\(税抜\\)|\\+tax)*') keisen = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' blocks = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' self.content_trans1 = str.maketrans({k: '' for k in keisen + blocks}) def __len__(self): return len(self.ids_to_tokens) def clean_text(self, content): content = self.content_repatter1.sub('', content) content = self.content_repatter2.sub('', content) content = self.content_repatter3.sub('', content) content = self.content_repatter4.sub('', content) content = self.content_repatter5.sub('', content) content = self.content_repatter6.sub('', content) content = content.translate(self.content_trans1) while '' in content: content = content.replace('', '') return content def tokenize(self, text, clean=False): text = text.replace(' ', '') text = text.replace('\u3000', '') text = text.replace('\r\n', '
') text = text.replace('\n', '
') text = text.replace('\r', '
') text = text.replace('\t', '') text = text.replace('—', 'ー') text = text.replace('−', 'ー') for (k, v) in self.emoji['emoji'].items(): if k in text: text = text.replace(k, v) if clean: text = self.clean_text(text) def check_simbol(x): e = x.encode() if len(x) == 1 and len(e) == 2: c = (int(e[0]) << 8) + int(e[1]) if c >= 49825 and c <= 49855 or (c >= 51072 and c <= 51075) or (c >= 51897 and c <= 52159) or (c >= 52352 and c <= 52642): return True return False def checku2e(x): e = x.encode() if len(x) == 1 and len(e) == 3: c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) if c >= 14844032 and c <= 14856319: return True return False pos = 0 result = [] while pos < len(text): end = min(len(text), pos + self.maxlen + 1) if text[pos] == '<' else pos + 3 candidates = [] for e in range(end, pos, -1): wd = text[pos:e] if wd in self.vocab: if wd[0] == '<' and len(wd) > 2: candidates = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: (_, wd, e) = sorted(candidates, key=lambda x: x[0])[0] result.append(wd) pos = e else: end = pos + 1 wd = text[pos:end] if check_simbol(wd): result.append('') elif checku2e(wd): result.append('') else: for i in wd.encode('utf-8'): result.append('<|byte%d|>' % i) pos = end return result def convert_id_to_token(self, index, breakline='\n'): words = [] byte_tokens = [] word = self.ids_to_tokens[index][0] if word[:6] == '<|byte' and word[-2:] == '|>': byte_tokens.append(int(word[6:-2])) else: if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode('utf-8', errors='replace')) byte_tokens = [] if word[:7] == '<|emoji' and word[-2:] == '|>': words.append(self.emoji['emoji_inv'][word]) elif word == '': words.append(' ') elif word == '
': words.append(breakline) elif word == '': words.append('\t') elif word == '': words.append('▀') elif word == '': words.append('ǀ') elif word == '': words.append('‖') else: words.append(word) if len(byte_tokens) > 0: words.append(bytearray(byte_tokens).decode('utf-8', errors='replace')) text = ''.join(words) return text # File: transformers-main/src/transformers/models/gpt_sw3/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_gpt_sw3'] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_sw3 import GPTSw3Tokenizer else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gpt_sw3/convert_megatron_to_pytorch.py """""" import argparse import os from os.path import isfile import torch from transformers import GPT2Config def recursive_print(name, val, spaces=0): if name is None: msg = None else: fmt = '.' * max(0, spaces - 2) + '# {:' + str(50 - spaces) + 's}' msg = fmt.format(name) if isinstance(val, dict): if msg is not None: print(msg) for k in val.keys(): recursive_print(k, val[k], spaces + 2) elif isinstance(val, torch.Tensor): print(msg, ':', val.size()) else: print(msg, ':', val) def fix_query_key_value_ordering(param, num_splits, num_heads, hidden_size): input_shape = param.size() saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param def convert_megatron_checkpoint(sd_megatron, config): n_positions = config.n_positions layers = config.n_layer vocab_size = config.vocab_size heads = config.n_head hidden_size_per_head = config.n_embd // config.n_head word_embeddings = sd_megatron['model.language_model.embedding.word_embeddings.weight'][:vocab_size, :] sd_hf = {'transformer.wte.weight': word_embeddings, 'transformer.wpe.weight': sd_megatron['model.language_model.embedding.position_embeddings.weight'], 'transformer.ln_f.weight': sd_megatron['model.language_model.encoder.final_layernorm.weight'], 'transformer.ln_f.bias': sd_megatron['model.language_model.encoder.final_layernorm.bias']} pf = 'model.language_model.encoder.layers.' for i in range(layers): causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.bool)) causal_mask = causal_mask.view(1, 1, n_positions, n_positions) sd_hf[f'transformer.h.{i}.attn.bias'] = causal_mask sd_hf[f'transformer.h.{i}.attn.masked_bias'] = torch.tensor(-10000.0, dtype=torch.bfloat16) sd_hf[f'transformer.h.{i}.ln_1.weight'] = sd_megatron[f'{pf}{i}.input_layernorm.weight'] sd_hf[f'transformer.h.{i}.ln_1.bias'] = sd_megatron[f'{pf}{i}.input_layernorm.bias'] val1 = sd_megatron[f'{pf}{i}.self_attention.query_key_value.weight'] val1 = fix_query_key_value_ordering(val1, 3, heads, hidden_size_per_head) sd_hf[f'transformer.h.{i}.attn.c_attn.weight'] = val1.transpose(0, 1).contiguous() val2 = sd_megatron[f'{pf}{i}.self_attention.query_key_value.bias'] val2 = fix_query_key_value_ordering(val2, 3, heads, hidden_size_per_head) sd_hf[f'transformer.h.{i}.attn.c_attn.bias'] = val2 sd_hf[f'transformer.h.{i}.attn.c_proj.weight'] = sd_megatron[f'{pf}{i}.self_attention.dense.weight'].transpose(0, 1) sd_hf[f'transformer.h.{i}.attn.c_proj.bias'] = sd_megatron[f'{pf}{i}.self_attention.dense.bias'] sd_hf[f'transformer.h.{i}.ln_2.weight'] = sd_megatron[f'{pf}{i}.post_attention_layernorm.weight'] sd_hf[f'transformer.h.{i}.ln_2.bias'] = sd_megatron[f'{pf}{i}.post_attention_layernorm.bias'] sd_hf[f'transformer.h.{i}.mlp.c_fc.weight'] = sd_megatron[f'{pf}{i}.mlp.dense_h_to_4h.weight'].transpose(0, 1) sd_hf[f'transformer.h.{i}.mlp.c_fc.bias'] = sd_megatron[f'{pf}{i}.mlp.dense_h_to_4h.bias'] sd_hf[f'transformer.h.{i}.mlp.c_proj.weight'] = sd_megatron[f'{pf}{i}.mlp.dense_4h_to_h.weight'].transpose(0, 1) sd_hf[f'transformer.h.{i}.mlp.c_proj.bias'] = sd_megatron[f'{pf}{i}.mlp.dense_4h_to_h.bias'] sd_hf['lm_head.weight'] = word_embeddings return sd_hf def copy_config(config_hf, config_megatron): config_hf.vocab_size = 64000 config_hf.n_positions = config_megatron['encoder_seq_length'] config_hf.n_embd = config_megatron['hidden_size'] config_hf.n_layer = config_megatron['num_layers'] config_hf.n_head = config_megatron['num_attention_heads'] config_hf.n_inner = config_megatron['ffn_hidden_size'] config_hf.activation_function = 'gelu' config_hf.resid_pdrop = 0.1 config_hf.embd_pdrop = 0.1 config_hf.attn_pdrop = 0.1 config_hf.layer_norm_epsilon = config_megatron['layernorm_epsilon'] config_hf.initializer_range = config_megatron['init_method_std'] config_hf.apply_query_key_layer_scaling = config_megatron['apply_query_key_layer_scaling'] config_hf.normalize_attention_scores = True config_hf.use_cache = True if config_megatron['hidden_size'] == 4096: config_hf.bos_token_id = 1 config_hf.eos_token_id = 1 config_hf.pad_token_id = 0 else: config_hf.bos_token_id = 2 config_hf.eos_token_id = 3 config_hf.pad_token_id = 0 return config_hf def main(args): print(args) checkpoint_path = args.checkpoint_path save_path = args.save_path if isfile(checkpoint_path): raise FileNotFoundError(f'ERROR! could not find file {checkpoint_path}') checkpoint = torch.load(checkpoint_path, map_location='cpu') config_megatron = checkpoint['hyper_parameters']['cfg'] config_hf = GPT2Config() config_hf = copy_config(config_hf=config_hf, config_megatron=config_megatron) config_hf.architectures = ['GPT2LMHeadModel'] sd_megatron = checkpoint['state_dict'] print('Converting') sd_hf = convert_megatron_checkpoint(sd_megatron, config_hf) if args.print_checkpoint_structure: recursive_print(None, sd_hf) config_hf.tokenizer_class = 'GPTSw3Tokenizer' print('Saving config') config_hf.save_pretrained(save_path) output_checkpoint_file = os.path.join(save_path, 'pytorch_model.bin') print(f'Saving checkpoint to "{output_checkpoint_file}"') torch.save(sd_hf, output_checkpoint_file) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True, help='e.g. megatron_gpt--val_loss=2.42-step=38000-consumed_samples=54720000') parser.add_argument('--save_path', type=str, required=True, help='e.g. /home/user/gpt-sw3/hf') parser.add_argument('--print-checkpoint-structure', action='store_true') _args = parser.parse_args() main(_args) # File: transformers-main/src/transformers/models/gpt_sw3/tokenization_gpt_sw3.py """""" import os import re import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} class GPTSw3Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, do_lower_case=False, remove_space=False, keep_accents=False, pad_token=None, unk_token=None, eos_token=None, bos_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs name_or_path = kwargs.get('name_or_path') if name_or_path is None: logger.warning('name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b, you are testing the model, this can safely be ignored') name_or_path = 'None' eos_token = '<|endoftext|>' if eos_token is None else eos_token unk_token = '' if unk_token is None else unk_token if 'gpt-sw3-7b' in name_or_path: pad_token = unk_token if pad_token is None else pad_token bos_token = eos_token if bos_token is None else bos_token else: pad_token = '' if pad_token is None else pad_token bos_token = '' if bos_token is None else bos_token self.do_lower_case = do_lower_case self.remove_space = remove_space self.keep_accents = keep_accents self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) self.whitespaces = {' ', '\u2009', '\u200a', '\u202f', '\u2005', '\u3000', '\u2002', ' ', '\u2008', '\u2003', '', '\x84'} self.non_printing_characters_re = re.compile(f"[{''.join(map(chr, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]") super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if not hasattr(self, 'sp_model_kwargs'): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) @property def vocab_size(self) -> int: return len(self.sp_model) def preprocess_text(self, text: str) -> str: text = self.non_printing_characters_re.sub('', text) text = ''.join([char if char not in self.whitespaces else ' ' for char in text]) text = unicodedata.normalize('NFC', text) return text def _tokenize(self, text: str, **kwargs) -> List[str]: text = self.preprocess_text(text) return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token: str) -> int: return self.sp_model.PieceToId(token) def _convert_id_to_token(self, index: int) -> str: return self.sp_model.IdToPiece(index) @staticmethod def clean_up_tokenization(out_string: str) -> str: return out_string def convert_tokens_to_string(self, tokens: List[str]) -> str: current_sub_tokens = [] out_string = '' prev_is_special = False for token in tokens: if token in self.all_special_tokens: if not prev_is_special: out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def encode_fast(self, text: Union[str, List[str]], return_tensors: Union[str, bool]=False) -> Union[List[int], List[List[int]], 'torch.Tensor']: if isinstance(text, str): text = self.preprocess_text(text) token_ids = self.sp_model.encode(text) else: text = [self.preprocess_text(t) for t in text] token_ids = self.sp_model.encode(text) if return_tensors is True or return_tensors == 'pt': token_ids = torch.tensor(token_ids) return token_ids def decode_fast(self, token_ids: Union[int, List[int]]) -> str: return self.sp_model.decode(token_ids) # File: transformers-main/src/transformers/models/gptj/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available _import_structure = {'configuration_gptj': ['GPTJConfig', 'GPTJOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_gptj'] = ['GPTJForCausalLM', 'GPTJForQuestionAnswering', 'GPTJForSequenceClassification', 'GPTJModel', 'GPTJPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_gptj'] = ['TFGPTJForCausalLM', 'TFGPTJForQuestionAnswering', 'TFGPTJForSequenceClassification', 'TFGPTJModel', 'TFGPTJPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_flax_gptj'] = ['FlaxGPTJForCausalLM', 'FlaxGPTJModel', 'FlaxGPTJPreTrainedModel'] if TYPE_CHECKING: from .configuration_gptj import GPTJConfig, GPTJOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gptj import GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, GPTJModel, GPTJPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_gptj import TFGPTJForCausalLM, TFGPTJForQuestionAnswering, TFGPTJForSequenceClassification, TFGPTJModel, TFGPTJPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/gptj/configuration_gptj.py """""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging logger = logging.get_logger(__name__) class GPTJConfig(PretrainedConfig): model_type = 'gptj' attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=50400, n_positions=2048, n_embd=4096, n_layer=28, n_head=16, rotary_dim=64, n_inner=None, activation_function='gelu_new', resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=50256, eos_token_id=50256, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.rotary_dim = rotary_dim self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) class GPTJOnnxConfig(OnnxConfigWithPast): def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False): super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) if not getattr(self._config, 'pad_token_id', None): self._config.pad_token_id = 0 @property def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(common_inputs, direction='inputs') common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'} else: common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'} return common_inputs @property def num_layers(self) -> int: return self._config.n_layer @property def num_attention_heads(self) -> int: return self._config.n_head def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]: common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework) ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']}) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch (batch, seqlen) = common_inputs['input_ids'].shape past_key_values_length = seqlen + 2 past_shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads) ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)] ordered_inputs['attention_mask'] = common_inputs['attention_mask'] if self.use_past: mask_dtype = ordered_inputs['attention_mask'].dtype ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1) return ordered_inputs @property def default_onnx_opset(self) -> int: return 13 # File: transformers-main/src/transformers/models/gptj/modeling_flax_gptj.py from functools import partial from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gptj import GPTJConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'gptj' _CONFIG_FOR_DOC = 'GPTJConfig' GPTJ_START_DOCSTRING = '\n\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n\n Finally, this model supports inherent JAX features such as:\n\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and\n [`~FlaxPreTrainedModel.to_bf16`].\n' GPTJ_INPUTS_DOCSTRING = '\n Args:\n input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' def create_sinusoidal_positions(num_pos, dim): inv_freq = 1.0 / 10000 ** (np.arange(0, dim, 2) / dim) sinusoid_inp = np.einsum('i , j -> i j', np.arange(num_pos), inv_freq).astype('float32') (sin, cos) = (np.sin(sinusoid_inp), np.cos(sinusoid_inp)) sentinel = dim // 2 + dim % 2 out = np.zeros((num_pos, dim)) out[:, 0:sentinel] = sin out[:, sentinel:] = cos return jnp.array(out) def rotate_every_two(tensor): rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1) rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,)) return rotate_half_tensor def apply_rotary_pos_emb(tensor, sincos): (sin_pos, cos_pos) = sincos sin_pos = sin_pos[:, :, None, :].repeat(2, 3) cos_pos = cos_pos[:, :, None, :].repeat(2, 3) return tensor * cos_pos + rotate_every_two(tensor) * sin_pos class FlaxGPTJAttention(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 causal: bool = True is_cross_attention: bool = False def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.rotary_dim = config.rotary_dim dense = partial(nn.Dense, self.embed_dim, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range)) (self.q_proj, self.k_proj, self.v_proj) = (dense(), dense(), dense()) self.out_proj = dense() self.resid_dropout = nn.Dropout(rate=config.resid_pdrop) self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype='bool'), dtype='bool') pos_embd_dim = self.rotary_dim or self.embed_dim self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape, value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (*batch_dims, max_length, num_heads, depth_per_head) = cached_key.value.shape cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors pad_mask = jnp.broadcast_to(jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) def __call__(self, hidden_states, attention_mask, position_ids, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) sincos = jnp.take(self.embed_positions, position_ids, axis=0) sincos = jnp.split(sincos, 2, axis=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, :self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim:] q_rot = query[:, :, :, :self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim:] k_rot = apply_rotary_pos_emb(k_rot, sincos) q_rot = apply_rotary_pos_emb(q_rot, sincos) key = jnp.concatenate([k_rot, k_pass], axis=-1) query = jnp.concatenate([q_rot, q_pass], axis=-1) else: key = apply_rotary_pos_emb(key, sincos) query = apply_rotary_pos_emb(query, sincos) (query_length, key_length) = (query.shape[1], key.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attn_pdrop > 0.0: dropout_rng = self.make_rng('dropout') if self.has_variable('cache', 'cached_key') or init_cache: (key, value, attention_mask) = self._concatenate_to_cache(key, value, query, attention_mask) attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) attn_weights = dot_product_attention_weights(query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attn_pdrop, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPTJMLP(nn.Module): config: GPTJConfig intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_pdrop) def __call__(self, hidden_states, deterministic: bool=True): hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPTJBlock(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype) self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, attention_mask=attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) hidden_states = attn_output + feed_forward_hidden_states + residual return (hidden_states,) + attn_outputs[1:] class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel): config_class = GPTJConfig base_model_prefix = 'transformer' module_class: nn.Module = None def __init__(self, config: GPTJConfig, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_ids = jnp.zeros(input_shape, dtype='i4') attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, encoder_hidden_states, encoder_attention_mask, return_dict=False) else: module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) random_params = module_init_outputs['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return init_variables['cache'] @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING) def __call__(self, input_ids, attention_mask=None, position_ids=None, params: dict=None, past_key_values: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict (batch_size, sequence_length) = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `position_ids` when passing `past_key_values`.') position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False outputs = self.module.apply(inputs, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable) if past_key_values is not None and return_dict: (outputs, past_key_values) = outputs outputs['past_key_values'] = unfreeze(past_key_values['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past_key_values) = outputs outputs = outputs[:1] + (unfreeze(past_key_values['cache']),) + outputs[1:] return outputs class FlaxGPTJBlockCollection(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)] def __call__(self, hidden_states, attention_mask=None, position_ids=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block(hidden_states, attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGPTJModule(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size self.wte = nn.Embed(self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) self.dropout = nn.Dropout(rate=self.config.embd_pdrop) self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__(self, input_ids, attention_mask, position_ids, deterministic=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): input_embeds = self.wte(input_ids.astype('i4')) hidden_states = self.dropout(input_embeds, deterministic=deterministic) outputs = self.h(hidden_states, attention_mask, position_ids=position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1]) @add_start_docstrings('The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.', GPTJ_START_DOCSTRING) class FlaxGPTJModel(FlaxGPTJPreTrainedModel): module_class = FlaxGPTJModule append_call_sample_docstring(FlaxGPTJModel, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) class FlaxGPTJForCausalLMModule(nn.Module): config: GPTJConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range)) def __call__(self, input_ids, attention_mask, position_ids, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): outputs = self.transformer(input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables['params']['wte']['embedding'].T lm_logits = self.lm_head.apply({'params': {'kernel': shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n The GPTJ Model transformer with a language modeling head on top.\n ', GPTJ_START_DOCSTRING) class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel): module_class = FlaxGPTJForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array]=None): (batch_size, seq_length) = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'attention_mask': extended_attention_mask, 'position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['position_ids'] = model_kwargs['position_ids'][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGPTJForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) # File: transformers-main/src/transformers/models/gptj/modeling_gptj.py """""" import warnings from typing import Optional, Tuple, Union import torch import torch.fx import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_torch_fx_proxy, logging from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gptj import GPTJConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'hf-internal-testing/tiny-random-gptj' _REAL_CHECKPOINT_FOR_DOC = 'EleutherAI/gpt-j-6B' _CONFIG_FOR_DOC = 'GPTJConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: inv_freq = 1.0 / 10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim) sinusoid_inp = torch.einsum('i , j -> i j', torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float() return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) @torch.fx.wrap def get_embed_positions(embed_positions, position_ids): return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1) def rotate_every_two(x: torch.Tensor) -> torch.Tensor: x1 = x[:, :, :, ::2] x2 = x[:, :, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor: sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3) cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3) return tensor * cos + rotate_every_two(tensor) * sin class GPTJAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = True self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_attention_heads if self.head_dim * self.num_attention_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and `num_attention_heads`: {self.num_attention_heads}).') self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.rotary_dim = config.rotary_dim pos_embd_dim = self.rotary_dim or self.embed_dim self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary): new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) tensor = tensor.view(new_shape) if rotary: return tensor if len(tensor.shape) == 5: return tensor.permute(0, 1, 3, 2, 4) elif len(tensor.shape) == 4: return tensor.permute(0, 2, 1, 3) else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}') def _merge_heads(self, tensor, num_attention_heads, attn_head_size): if len(tensor.shape) == 5: tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() elif len(tensor.shape) == 4: tensor = tensor.permute(0, 2, 1, 3).contiguous() else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}') new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) return tensor.view(new_shape) def _attn(self, query, key, value, attention_mask=None, head_mask=None): query = query.to(torch.float32) key = key.to(torch.float32) attn_weights = torch.matmul(query, key.transpose(-1, -2)) attn_weights = attn_weights / self.scale_attn if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def _get_embed_positions(self, position_ids): embed_positions = self.embed_positions if embed_positions.device != position_ids.device: embed_positions = embed_positions.to(position_ids.device) self.embed_positions = embed_positions return embed_positions.repeat(position_ids.shape[0], 1, 1) def forward(self, hidden_states: torch.FloatTensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]]]: query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): embed_positions = get_embed_positions(self.embed_positions, position_ids) else: embed_positions = self._get_embed_positions(position_ids) repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) sincos = torch.gather(embed_positions, 1, repeated_position_ids) (sin, cos) = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, :self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim:] q_rot = query[:, :, :, :self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim:] k_rot = apply_rotary_pos_emb(k_rot, sin, cos) q_rot = apply_rotary_pos_emb(q_rot, sin, cos) key = torch.cat([k_rot, k_pass], dim=-1) query = torch.cat([q_rot, q_pass], dim=-1) else: key = apply_rotary_pos_emb(key, sin, cos) query = apply_rotary_pos_emb(query, sin, cos) key = key.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) if layer_past is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_dim, 'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_idx, cache_kwargs) (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return outputs class GPTJFlashAttention2(GPTJAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.FloatTensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]]]: query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): embed_positions = get_embed_positions(self.embed_positions, position_ids) else: embed_positions = self._get_embed_positions(position_ids) repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) sincos = torch.gather(embed_positions, 1, repeated_position_ids) (sin, cos) = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, :self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim:] q_rot = query[:, :, :, :self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim:] k_rot = apply_rotary_pos_emb(k_rot, sin, cos) q_rot = apply_rotary_pos_emb(q_rot, sin, cos) key = torch.cat([k_rot, k_pass], dim=-1) query = torch.cat([q_rot, q_pass], dim=-1) else: key = apply_rotary_pos_emb(key, sin, cos) query = apply_rotary_pos_emb(query, sin, cos) key = key.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) if layer_past is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_dim, 'cache_position': cache_position} (key, value) = layer_past.update(key, value, self.layer_idx, cache_kwargs) key = key.permute(0, 2, 1, 3).contiguous() query = query.permute(0, 2, 1, 3).contiguous() value = value.permute(0, 2, 1, 3).contiguous() input_dtype = query.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attention_dropout = self.config.attn_pdrop if self.training else 0.0 query_length = query.shape[1] attn_weights = _flash_attention_forward(query, key, value, attention_mask, query_length, dropout=attention_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_weights.reshape(attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, layer_past) if output_attentions: outputs += (attn_weights,) return outputs GPTJ_ATTENTION_CLASSES = {'eager': GPTJAttention, 'flash_attention_2': GPTJFlashAttention2} class GPTJMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.n_embd self.fc_in = nn.Linear(embed_dim, intermediate_size) self.fc_out = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTJBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = GPTJMLP(inner_dim, config) def forward(self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) attn_output = attn_outputs[0] outputs = attn_outputs[1:] feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = attn_output + feed_forward_hidden_states + residual if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class GPTJPreTrainedModel(PreTrainedModel): config_class = GPTJConfig base_model_prefix = 'transformer' is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ['GPTJBlock'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True _supports_param_buffer_assignment = False def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): if isinstance(module, (nn.Linear,)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) GPTJ_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GPTJ_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" PARALLELIZE_DOCSTRING = '\n This is an experimental feature and is a subject to change at a moment\'s notice. Uses a device map to distribute\n attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks\n across all devices.\n\n Args:\n device_map (`Dict[int, list]`, *optional*):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the\n following number of attention modules:\n\n - gpt-j-6B: 28\n\n Example:\n\n ```python\n # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:\n model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6],\n 1: [7, 8, 9, 10, 11, 12, 13],\n 2: [14, 15, 16, 17, 18, 19, 20],\n 3: [21, 22, 23, 24, 25, 26, 27],\n }\n model.parallelize(device_map)\n ```\n' DEPARALLELIZE_DOCSTRING = '\n Moves the model to CPU from a model parallel state.\n\n Example:\n\n ```python\n # On a 4 GPU machine with gpt-j-6B:\n model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6],\n 1: [7, 8, 9, 10, 11, 12, 13],\n 2: [14, 15, 16, 17, 18, 19, 20],\n 3: [21, 22, 23, 24, 25, 26, 27],\n }\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n ```\n' @add_start_docstrings('The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.', GPTJ_START_DOCSTRING) class GPTJModel(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.n_embd self.vocab_size = config.vocab_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTJBlock(config, layer_idx=i) for i in range(config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.model_parallel = False self.device_map = None self.gradient_checkpointing = False self.post_init() self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = 'cpu' if 'cpu' in self.device_map.keys() else 'cuda:' + str(min(self.device_map.keys())) self.last_device = 'cuda:' + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) for (k, v) in self.device_map.items(): for block in v: cuda_device = 'cuda:' + str(k) self.h[block] = self.h[block].to(cuda_device) self.ln_f = self.ln_f.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.model_parallel = False self.device_map = None self.first_device = 'cpu' self.last_device = 'cpu' self.wte = self.wte.to('cpu') for index in range(len(self.h)): self.h[index] = self.h[index].to('cpu') self.ln_f = self.ln_f.to('cpu') torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if inputs_embeds is None: inputs_embeds = self.wte(input_ids) use_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): use_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') seq_length = inputs_embeds.shape[1] if cache_position is None: past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) head_mask = self.get_head_mask(head_mask, self.config.n_layer) hidden_states = inputs_embeds if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, seq_length) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1, seq_length, hidden_states.size(-1)) next_decoder_cache = None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, block) in enumerate(self.h): if self.model_parallel: torch.cuda.set_device(hidden_states.device) if past_key_values is not None: past_key_values.key_cache = past_key_values.key_cache.to(hidden_states.device) past_key_values.value_cache = past_key_values.value_cache.to(hidden_states.device) if causal_mask is not None: causal_mask = causal_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, causal_mask, position_ids, head_mask[i], use_cache, output_attentions, cache_position) else: outputs = block(hidden_states=hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position) hidden_states = outputs[0] if use_cache is True: next_decoder_cache = outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.model_parallel: for (k, v) in self.device_map.items(): if i == v[-1] and 'cuda:' + str(k) != self.last_device: hidden_states = hidden_states.to('cuda:' + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @add_start_docstrings('\n The GPT-J Model transformer with a language modeling head on top.\n ', GPTJ_START_DOCSTRING) class GPTJForCausalLM(GPTJPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = GPTJModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn("`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning) self.device_map = get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning) self.transformer.deparallelize() self.transformer = self.transformer.to('cpu') self.lm_head = self.lm_head.to('cpu') self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, past_key_values=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}) return model_inputs @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = transformer_outputs[0] if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states).to(torch.float32) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('\n The GPT-J Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT, GPT-2, GPT-Neo) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPTJ_START_DOCSTRING) class GPTJForSequenceClassification(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTJModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint='ydshieh/tiny-random-gptj-for-sequence-classification', output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, SequenceClassifierOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(pooled_logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) @add_start_docstrings('\n The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', GPTJ_START_DOCSTRING) class GPTJForQuestionAnswering(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTJModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model_parallel = False self.device_map = None self.post_init() @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, real_checkpoint=_REAL_CHECKPOINT_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/gptj/modeling_tf_gptj.py """""" from __future__ import annotations from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutputWithPast from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSharedEmbeddings, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_gptj import GPTJConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'EleutherAI/gpt-j-6B' _CONFIG_FOR_DOC = 'GPTJConfig' def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor: inv_freq = tf.cast(1.0 / 10000 ** (tf.range(0, dim, 2) / dim), tf.float32) sinusoid_inp = tf.cast(tf.einsum('i , j -> i j', tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32) (sin, cos) = (tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)) out = tf.concat((sin, cos), axis=1) return out def rotate_every_two(x: tf.Tensor) -> tf.Tensor: rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1) new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])] rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape) return rotate_half_tensor def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor: (sin_pos, cos_pos) = sincos sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3) cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3) return tensor * cos_pos + rotate_every_two(tensor) * sin_pos class TFGPTJAttention(keras.layers.Layer): def __init__(self, config: GPTJConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_attention_heads if self.head_dim * self.num_attention_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and `num_attention_heads`: {self.num_attention_heads}).') self.scale_attn = self.head_dim ** 0.5 self.rotary_dim = config.rotary_dim self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) self.q_proj = keras.layers.Dense(self.embed_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='q_proj') self.k_proj = keras.layers.Dense(self.embed_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='k_proj') self.v_proj = keras.layers.Dense(self.embed_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='v_proj') self.out_proj = keras.layers.Dense(self.embed_dim, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='out_proj') self.max_positions = config.max_position_embeddings self.lower_triangle_mask = tf.reshape(tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8), (1, 1, self.max_positions, self.max_positions)) pos_embd_dim = self.rotary_dim or self.embed_dim self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim) def get_causal_mask(self, key_length, query_length) -> tf.Tensor: return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length:key_length, :key_length], tf.bool) @staticmethod def get_masked_bias(dtype: tf.DType) -> tf.Tensor: return tf.cast(tf.constant(-1000000000.0), dtype) def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor: new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim] hidden_states = tf.reshape(hidden_states, new_shape) if rotary: return hidden_states if len(shape_list(hidden_states)) == 4: return tf.transpose(hidden_states, (0, 2, 1, 3)) if len(shape_list(hidden_states)) == 5: return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}') def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor: if len(shape_list(hidden_states)) == 4: hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3)) elif len(shape_list(hidden_states)) == 5: hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4)) else: raise ValueError(f'Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}') new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim] return tf.reshape(hidden_states, new_shape) def _attn(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor, attention_mask: tf.Tensor | None=None, head_mask: tf.Tensor | None=None) -> Tuple[tf.Tensor, tf.Tensor]: (query_length, key_length) = (shape_list(query)[-2], shape_list(key)[-2]) causal_mask = self.get_causal_mask(key_length, query_length) query = tf.cast(query, tf.float32) key = tf.cast(key, tf.float32) attn_weights = tf.matmul(query, key, transpose_b=True) attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype)) attn_weights = attn_weights / self.scale_attn if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = stable_softmax(attn_weights, axis=-1) attn_weights = tf.cast(attn_weights, value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = tf.matmul(attn_weights, value) return (attn_output, attn_weights) def call(self, hidden_states: tf.Tensor, layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]]=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, use_cache: bool=False, output_attentions: bool=False): query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, True) key = self._split_heads(key, True) value = self._split_heads(value, False) sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype) sincos = tf.split(sincos, 2, axis=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, :self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim:] q_rot = query[:, :, :, :self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim:] k_rot = apply_rotary_pos_emb(k_rot, sincos) q_rot = apply_rotary_pos_emb(q_rot, sincos) key = tf.concat((k_rot, k_pass), axis=-1) query = tf.concat((q_rot, q_pass), axis=-1) else: key = apply_rotary_pos_emb(key, sincos) query = apply_rotary_pos_emb(query, sincos) key = tf.transpose(key, (0, 2, 1, 3)) query = tf.transpose(query, (0, 2, 1, 3)) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = tf.concat((past_key, key), axis=-2) value = tf.concat((past_value, value), axis=-2) if use_cache is True: present = (key, value) else: present = None (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFGPTJMLP(keras.layers.Layer): def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs): super().__init__(**kwargs) embed_dim = config.n_embd self.fc_in = keras.layers.Dense(intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name='fc_in') self.fc_out = keras.layers.Dense(embed_dim, kernel_initializer=get_initializer(config.initializer_range), name='fc_out') self.act = get_tf_activation(config.activation_function) self.dropout = keras.layers.Dropout(config.embd_pdrop) self.embed_dim = config.n_embd self.intermediate_size = intermediate_size def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'fc_in', None) is not None: with tf.name_scope(self.fc_in.name): self.fc_in.build([None, None, self.embed_dim]) if getattr(self, 'fc_out', None) is not None: with tf.name_scope(self.fc_out.name): self.fc_out.build([None, None, self.intermediate_size]) class TFGPTJBlock(keras.layers.Layer): def __init__(self, config: GPTJConfig, **kwargs): super().__init__(**kwargs) inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1') self.attn = TFGPTJAttention(config, name='attn') self.mlp = TFGPTJMLP(inner_dim, config, name='mlp') self.config = config def call(self, hidden_states: tf.Tensor, layer_past: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, use_cache: bool=False, output_attentions: bool=False): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = attn_output + feed_forward_hidden_states + residual if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'ln_1', None) is not None: with tf.name_scope(self.ln_1.name): self.ln_1.build([None, None, self.config.n_embd]) if getattr(self, 'attn', None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) @keras_serializable class TFGPTJMainLayer(keras.layers.Layer): config_class = GPTJConfig def __init__(self, config: GPTJConfig, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.wte = TFSharedEmbeddings(config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name='wte') self.drop = keras.layers.Dropout(config.embd_pdrop) self.h = [TFGPTJBlock(config, name=f'h_._{i}') for i in range(config.n_layer)] self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f') self.embed_dim = config.n_embd def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value: tf.Tensor): self.wte.weight = value self.wte.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call(self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) if attention_mask is not None: attention_mask_shape = shape_list(attention_mask) attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.wte.vocab_size) inputs_embeds = self.wte(input_ids, mode='embedding') if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode='embedding') else: token_type_embeds = tf.constant(0.0) token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) hidden_states = inputs_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block(hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, training=training) hidden_states = outputs[0] if use_cache: presents = presents + (outputs[1],) if output_attentions: all_attentions = all_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple((tf.reshape(t, attention_output_shape) for t in all_attentions)) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)) return TFBaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'wte', None) is not None: with tf.name_scope(self.wte.name): self.wte.build(None) if getattr(self, 'ln_f', None) is not None: with tf.name_scope(self.ln_f.name): self.ln_f.build([None, None, self.embed_dim]) if getattr(self, 'h', None) is not None: for layer in self.h: with tf.name_scope(layer.name): layer.build(None) class TFGPTJPreTrainedModel(TFPreTrainedModel): config_class = GPTJConfig base_model_prefix = 'transformer' _keys_to_ignore_on_load_unexpected = ['h.\\d+.attn.bias'] GPTJ_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' GPTJ_INPUTS_DOCSTRING = "\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of\n input past key value states). Indices of input sequence tokens in the vocabulary.\n\n If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past` output below). Can be used to speed up sequential decoding. The token ids which have their past\n given to this model should not be passed as input ids as they have already been computed.\n attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n in eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.', GPTJ_START_DOCSTRING) class TFGPTJModel(TFGPTJPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPTJMainLayer(config, name='transformer') @unpack_inputs @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) @add_start_docstrings('\n The GPT-J Model transformer with a language modeling head on top.\n ', GPTJ_START_DOCSTRING) class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPTJMainLayer(config, name='transformer') self.lm_head = keras.layers.Dense(config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name='lm_head') self.config = config def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get('position_ids', None) attention_mask = kwargs.get('attention_mask', None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return {'input_ids': inputs, 'attention_mask': attention_mask, 'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'token_type_ids': token_type_ids} @unpack_inputs @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: shifted_logits = lm_logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFCausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.config.n_embd]) @add_start_docstrings('\n The GPT-J Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT, GPT-2, GPT-Neo) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ', GPTJ_START_DOCSTRING) class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss): _keys_to_ignore_on_load_missing = ['h.\\d+.attn.masked_bias', 'h.\\d+.attn.bias', 'lm_head.weight'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFGPTJMainLayer(config, name='transformer') self.score = keras.layers.Dense(self.num_labels, use_bias=False, kernel_initializer=get_initializer(config.initializer_range), name='score') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, labels: np.ndarray | tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: if labels is not None and self.config.pad_token_id is None and (input_ids.shape[0] != 1): raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) logits_shape = shape_list(logits) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 elif input_ids is not None: sequence_lengths = tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) - 1 sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') loss = None if labels is not None: if not tf.is_tensor(sequence_lengths): in_logits = logits[0:logits_shape[0], sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return TFSequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'score', None) is not None: with tf.name_scope(self.score.name): self.score.build([None, None, self.config.n_embd]) @add_start_docstrings('\n The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', GPTJ_START_DOCSTRING) class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss): _keys_to_ignore_on_load_missing = ['h.\\d+.attn.masked_bias', 'h.\\d+.attn.bias', 'lm_head.weight'] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFGPTJMainLayer(config, name='transformer') self.qa_outputs = keras.layers.Dense(self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name='qa_outputs') self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, start_positions: np.ndarray | tf.Tensor | None=None, end_positions: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: transformer_outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {'start_position': start_positions} labels['end_position'] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[2:] return (loss,) + output if loss is not None else output return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'transformer', None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, 'qa_outputs', None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) # File: transformers-main/src/transformers/models/granite/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_granite': ['GraniteConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_granite'] = ['GraniteForCausalLM', 'GraniteModel', 'GranitePreTrainedModel'] if TYPE_CHECKING: from .configuration_granite import GraniteConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_granite import GraniteForCausalLM, GraniteModel, GranitePreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/granite/configuration_granite.py """""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class GraniteConfig(PretrainedConfig): model_type = 'granite' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, embedding_multiplier=1.0, logits_scaling=1.0, residual_multiplier=1.0, attention_multiplier=1.0, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.mlp_bias = mlp_bias self.embedding_multiplier = embedding_multiplier self.logits_scaling = logits_scaling self.residual_multiplier = residual_multiplier self.attention_multiplier = attention_multiplier super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) rope_config_validation(self) # File: transformers-main/src/transformers/models/granite/modeling_granite.py from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_granite import GraniteConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GraniteConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask class GraniteRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' ALL_LAYERNORM_LAYERS.append(GraniteRMSNorm) class GraniteRotaryEmbedding(nn.Module): def __init__(self, config: GraniteConfig): super().__init__() self.rope_kwargs = {} if config.rope_scaling is not None: self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device=None, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq def _dynamic_frequency_update(self, position_ids, device): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_seq_len_cached: (inv_freq, self.attention_scaling) = self.rope_init_fn(self.config, device, seq_len=seq_len, **self.rope_kwargs) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = seq_len if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: self.register_buffer('inv_freq', self.original_inv_freq, persistent=False) self.max_seq_len_cached = self.original_max_seq_len @torch.no_grad() def forward(self, x, position_ids): if 'dynamic' in self.rope_type: self._dynamic_frequency_update(position_ids, device=x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() cos = cos * self.attention_scaling sin = sin * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class GraniteMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class GraniteAttention(nn.Module): def __init__(self, config: GraniteConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.scaling = config.attention_multiplier if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = position_embeddings (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, :key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GraniteFlashAttention2(GraniteAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = position_embeddings (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, softmax_scale=self.scaling, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class GraniteSdpaAttention(GraniteAttention): def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: logger.warning_once('GraniteModel is using GraniteSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) (cos, sin) = position_embeddings (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, :key_states.shape[-2]] if query_states.device.type == 'cuda' and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if causal_mask is None and q_len > 1 else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, scale=self.scaling) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return (attn_output, None, past_key_value) GRANITE_ATTENTION_CLASSES = {'eager': GraniteAttention, 'flash_attention_2': GraniteFlashAttention2, 'sdpa': GraniteSdpaAttention} class GraniteDecoderLayer(nn.Module): def __init__(self, config: GraniteConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = GRANITE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = GraniteMLP(config) self.input_layernorm = GraniteRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GraniteRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.residual_multiplier = config.residual_multiplier def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs) hidden_states = residual + hidden_states * self.residual_multiplier residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states * self.residual_multiplier outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs GRANITE_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GraniteConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Granite Model outputting raw hidden-states without any specific head on top.', GRANITE_START_DOCSTRING) class GranitePreTrainedModel(PreTrainedModel): config_class = GraniteConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['GraniteDecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() GRANITE_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance, see our\n [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare Granite Model outputting raw hidden-states without any specific head on top.', GRANITE_START_DOCSTRING) class GraniteModel(GranitePreTrainedModel): def __init__(self, config: GraniteConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([GraniteDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = GraniteRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.embedding_multiplier = config.embedding_multiplier self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.rotary_emb = GraniteRotaryEmbedding(config) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(GRANITE_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds * self.embedding_multiplier return_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)') if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings) else: layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask class GraniteForCausalLM(GranitePreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = GraniteModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(GRANITE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits / self.config.logits_scaling logits = logits.float() loss = None if labels is not None: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): if past_key_values is not None: if inputs_embeds is not None: input_ids = input_ids[:, -cache_position.shape[0]:] elif input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] position_ids = position_ids.clone(memory_format=torch.contiguous_format) if inputs_embeds is not None and cache_position[0] == 0: model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None} else: model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None} if isinstance(past_key_values, StaticCache) and attention_mask.ndim == 2: if model_inputs['inputs_embeds'] is not None: (batch_size, sequence_length, _) = model_inputs['inputs_embeds'].shape device = model_inputs['inputs_embeds'].device else: (batch_size, sequence_length) = model_inputs['input_ids'].shape device = model_inputs['input_ids'].device dtype = self.lm_head.weight.dtype min_dtype = torch.finfo(dtype).min attention_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=past_key_values.get_max_length(), dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=batch_size) model_inputs.update({'position_ids': position_ids, 'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask}) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/grounding_dino/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_grounding_dino': ['GroundingDinoConfig'], 'processing_grounding_dino': ['GroundingDinoProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_grounding_dino'] = ['GroundingDinoForObjectDetection', 'GroundingDinoModel', 'GroundingDinoPreTrainedModel'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_grounding_dino'] = ['GroundingDinoImageProcessor'] if TYPE_CHECKING: from .configuration_grounding_dino import GroundingDinoConfig from .processing_grounding_dino import GroundingDinoProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_grounding_dino import GroundingDinoForObjectDetection, GroundingDinoModel, GroundingDinoPreTrainedModel try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_grounding_dino import GroundingDinoImageProcessor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/grounding_dino/configuration_grounding_dino.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class GroundingDinoConfig(PretrainedConfig): model_type = 'grounding-dino' attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'} def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, auxiliary_loss=False, position_embedding_type='sine', num_feature_levels=4, encoder_n_points=4, decoder_n_points=4, two_stage=True, class_cost=1.0, bbox_cost=5.0, giou_cost=2.0, bbox_loss_coefficient=5.0, giou_loss_coefficient=2.0, focal_alpha=0.25, disable_custom_kernels=False, max_text_len=256, text_enhancer_dropout=0.0, fusion_droppath=0.1, fusion_dropout=0.0, embedding_init_target=True, query_dim=4, decoder_bbox_embed_share=True, two_stage_bbox_embed_share=False, positional_embedding_temperature=20, init_std=0.02, layer_norm_eps=1e-05, **kwargs): if backbone_config is None and backbone is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.') backbone_config = CONFIG_MAPPING['swin'](window_size=7, image_size=224, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the text config with default values (`BertConfig`).') self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points self.two_stage = two_stage self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha self.disable_custom_kernels = disable_custom_kernels if isinstance(text_config, dict): text_config['model_type'] = text_config['model_type'] if 'model_type' in text_config else 'bert' text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: text_config = CONFIG_MAPPING['bert']() self.text_config = text_config self.max_text_len = max_text_len self.text_enhancer_dropout = text_enhancer_dropout self.fusion_droppath = fusion_droppath self.fusion_dropout = fusion_dropout self.embedding_init_target = embedding_init_target self.query_dim = query_dim self.decoder_bbox_embed_share = decoder_bbox_embed_share self.two_stage_bbox_embed_share = two_stage_bbox_embed_share if two_stage_bbox_embed_share and (not decoder_bbox_embed_share): raise ValueError('If two_stage_bbox_embed_share is True, decoder_bbox_embed_share must be True.') self.positional_embedding_temperature = positional_embedding_temperature self.init_std = init_std self.layer_norm_eps = layer_norm_eps super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model # File: transformers-main/src/transformers/models/grounding_dino/convert_grounding_dino_to_hf.py """""" import argparse import requests import torch from PIL import Image from torchvision import transforms as T from transformers import AutoTokenizer, GroundingDinoConfig, GroundingDinoForObjectDetection, GroundingDinoImageProcessor, GroundingDinoProcessor, SwinConfig IMAGENET_MEAN = [0.485, 0.456, 0.406] IMAGENET_STD = [0.229, 0.224, 0.225] def get_grounding_dino_config(model_name): if 'tiny' in model_name: window_size = 7 embed_dim = 96 depths = (2, 2, 6, 2) num_heads = (3, 6, 12, 24) image_size = 224 elif 'base' in model_name: window_size = 12 embed_dim = 128 depths = (2, 2, 18, 2) num_heads = (4, 8, 16, 32) image_size = 384 else: raise ValueError('Model not supported, only supports base and large variants') backbone_config = SwinConfig(window_size=window_size, image_size=image_size, embed_dim=embed_dim, depths=depths, num_heads=num_heads, out_indices=[2, 3, 4]) config = GroundingDinoConfig(backbone_config=backbone_config) return config def create_rename_keys(state_dict, config): rename_keys = [] rename_keys.append(('backbone.0.patch_embed.proj.weight', 'model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.weight')) rename_keys.append(('backbone.0.patch_embed.proj.bias', 'model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.bias')) rename_keys.append(('backbone.0.patch_embed.norm.weight', 'model.backbone.conv_encoder.model.embeddings.norm.weight')) rename_keys.append(('backbone.0.patch_embed.norm.bias', 'model.backbone.conv_encoder.model.embeddings.norm.bias')) for (layer, depth) in enumerate(config.backbone_config.depths): for block in range(depth): rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.norm1.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.weight')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.norm1.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_before.bias')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.norm2.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.weight')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.norm2.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.layernorm_after.bias')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.attn.relative_position_bias_table', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.relative_position_bias_table')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.attn.proj.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.weight')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.attn.proj.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.output.dense.bias')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.weight')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.mlp.fc1.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.intermediate.dense.bias')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.weight')) rename_keys.append((f'backbone.0.layers.{layer}.blocks.{block}.mlp.fc2.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.output.dense.bias')) if layer != len(config.backbone_config.depths) - 1: rename_keys.append((f'backbone.0.layers.{layer}.downsample.reduction.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.reduction.weight')) rename_keys.append((f'backbone.0.layers.{layer}.downsample.norm.weight', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.weight')) rename_keys.append((f'backbone.0.layers.{layer}.downsample.norm.bias', f'model.backbone.conv_encoder.model.encoder.layers.{layer}.downsample.norm.bias')) for out_indice in config.backbone_config.out_indices: rename_keys.append((f'backbone.0.norm{out_indice - 1}.weight', f'model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.weight')) rename_keys.append((f'backbone.0.norm{out_indice - 1}.bias', f'model.backbone.conv_encoder.model.hidden_states_norms.stage{out_indice}.bias')) deformable_key_mappings = {'self_attn.sampling_offsets.weight': 'deformable_layer.self_attn.sampling_offsets.weight', 'self_attn.sampling_offsets.bias': 'deformable_layer.self_attn.sampling_offsets.bias', 'self_attn.attention_weights.weight': 'deformable_layer.self_attn.attention_weights.weight', 'self_attn.attention_weights.bias': 'deformable_layer.self_attn.attention_weights.bias', 'self_attn.value_proj.weight': 'deformable_layer.self_attn.value_proj.weight', 'self_attn.value_proj.bias': 'deformable_layer.self_attn.value_proj.bias', 'self_attn.output_proj.weight': 'deformable_layer.self_attn.output_proj.weight', 'self_attn.output_proj.bias': 'deformable_layer.self_attn.output_proj.bias', 'norm1.weight': 'deformable_layer.self_attn_layer_norm.weight', 'norm1.bias': 'deformable_layer.self_attn_layer_norm.bias', 'linear1.weight': 'deformable_layer.fc1.weight', 'linear1.bias': 'deformable_layer.fc1.bias', 'linear2.weight': 'deformable_layer.fc2.weight', 'linear2.bias': 'deformable_layer.fc2.bias', 'norm2.weight': 'deformable_layer.final_layer_norm.weight', 'norm2.bias': 'deformable_layer.final_layer_norm.bias'} text_enhancer_key_mappings = {'self_attn.in_proj_weight': 'text_enhancer_layer.self_attn.in_proj_weight', 'self_attn.in_proj_bias': 'text_enhancer_layer.self_attn.in_proj_bias', 'self_attn.out_proj.weight': 'text_enhancer_layer.self_attn.out_proj.weight', 'self_attn.out_proj.bias': 'text_enhancer_layer.self_attn.out_proj.bias', 'linear1.weight': 'text_enhancer_layer.fc1.weight', 'linear1.bias': 'text_enhancer_layer.fc1.bias', 'linear2.weight': 'text_enhancer_layer.fc2.weight', 'linear2.bias': 'text_enhancer_layer.fc2.bias', 'norm1.weight': 'text_enhancer_layer.layer_norm_before.weight', 'norm1.bias': 'text_enhancer_layer.layer_norm_before.bias', 'norm2.weight': 'text_enhancer_layer.layer_norm_after.weight', 'norm2.bias': 'text_enhancer_layer.layer_norm_after.bias'} fusion_key_mappings = {'gamma_v': 'fusion_layer.vision_param', 'gamma_l': 'fusion_layer.text_param', 'layer_norm_v.weight': 'fusion_layer.layer_norm_vision.weight', 'layer_norm_v.bias': 'fusion_layer.layer_norm_vision.bias', 'layer_norm_l.weight': 'fusion_layer.layer_norm_text.weight', 'layer_norm_l.bias': 'fusion_layer.layer_norm_text.bias', 'attn.v_proj.weight': 'fusion_layer.attn.vision_proj.weight', 'attn.v_proj.bias': 'fusion_layer.attn.vision_proj.bias', 'attn.l_proj.weight': 'fusion_layer.attn.text_proj.weight', 'attn.l_proj.bias': 'fusion_layer.attn.text_proj.bias', 'attn.values_v_proj.weight': 'fusion_layer.attn.values_vision_proj.weight', 'attn.values_v_proj.bias': 'fusion_layer.attn.values_vision_proj.bias', 'attn.values_l_proj.weight': 'fusion_layer.attn.values_text_proj.weight', 'attn.values_l_proj.bias': 'fusion_layer.attn.values_text_proj.bias', 'attn.out_v_proj.weight': 'fusion_layer.attn.out_vision_proj.weight', 'attn.out_v_proj.bias': 'fusion_layer.attn.out_vision_proj.bias', 'attn.out_l_proj.weight': 'fusion_layer.attn.out_text_proj.weight', 'attn.out_l_proj.bias': 'fusion_layer.attn.out_text_proj.bias'} for layer in range(config.encoder_layers): for (src, dest) in deformable_key_mappings.items(): rename_keys.append((f'transformer.encoder.layers.{layer}.{src}', f'model.encoder.layers.{layer}.{dest}')) for (src, dest) in text_enhancer_key_mappings.items(): rename_keys.append((f'transformer.encoder.text_layers.{layer}.{src}', f'model.encoder.layers.{layer}.{dest}')) for (src, dest) in fusion_key_mappings.items(): rename_keys.append((f'transformer.encoder.fusion_layers.{layer}.{src}', f'model.encoder.layers.{layer}.{dest}')) key_mappings_decoder = {'cross_attn.sampling_offsets.weight': 'encoder_attn.sampling_offsets.weight', 'cross_attn.sampling_offsets.bias': 'encoder_attn.sampling_offsets.bias', 'cross_attn.attention_weights.weight': 'encoder_attn.attention_weights.weight', 'cross_attn.attention_weights.bias': 'encoder_attn.attention_weights.bias', 'cross_attn.value_proj.weight': 'encoder_attn.value_proj.weight', 'cross_attn.value_proj.bias': 'encoder_attn.value_proj.bias', 'cross_attn.output_proj.weight': 'encoder_attn.output_proj.weight', 'cross_attn.output_proj.bias': 'encoder_attn.output_proj.bias', 'norm1.weight': 'encoder_attn_layer_norm.weight', 'norm1.bias': 'encoder_attn_layer_norm.bias', 'ca_text.in_proj_weight': 'encoder_attn_text.in_proj_weight', 'ca_text.in_proj_bias': 'encoder_attn_text.in_proj_bias', 'ca_text.out_proj.weight': 'encoder_attn_text.out_proj.weight', 'ca_text.out_proj.bias': 'encoder_attn_text.out_proj.bias', 'catext_norm.weight': 'encoder_attn_text_layer_norm.weight', 'catext_norm.bias': 'encoder_attn_text_layer_norm.bias', 'self_attn.in_proj_weight': 'self_attn.in_proj_weight', 'self_attn.in_proj_bias': 'self_attn.in_proj_bias', 'self_attn.out_proj.weight': 'self_attn.out_proj.weight', 'self_attn.out_proj.bias': 'self_attn.out_proj.bias', 'norm2.weight': 'self_attn_layer_norm.weight', 'norm2.bias': 'self_attn_layer_norm.bias', 'linear1.weight': 'fc1.weight', 'linear1.bias': 'fc1.bias', 'linear2.weight': 'fc2.weight', 'linear2.bias': 'fc2.bias', 'norm3.weight': 'final_layer_norm.weight', 'norm3.bias': 'final_layer_norm.bias'} for layer_num in range(config.decoder_layers): source_prefix_decoder = f'transformer.decoder.layers.{layer_num}.' target_prefix_decoder = f'model.decoder.layers.{layer_num}.' for (source_name, target_name) in key_mappings_decoder.items(): rename_keys.append((source_prefix_decoder + source_name, target_prefix_decoder + target_name)) for (layer_name, params) in state_dict.items(): if 'bert' in layer_name: rename_keys.append((layer_name, layer_name.replace('bert', 'model.text_backbone'))) if 'input_proj' in layer_name: rename_keys.append((layer_name, layer_name.replace('input_proj', 'model.input_proj_vision'))) if 'feat_map' in layer_name: rename_keys.append((layer_name, layer_name.replace('feat_map', 'model.text_projection'))) if 'transformer.decoder.ref_point_head' in layer_name: rename_keys.append((layer_name, layer_name.replace('transformer.decoder.ref_point_head', 'model.decoder.reference_points_head'))) if 'transformer.decoder.bbox_embed' in layer_name: rename_keys.append((layer_name, layer_name.replace('transformer.decoder.bbox_embed', 'model.decoder.bbox_embed'))) if 'transformer.enc_output' in layer_name: rename_keys.append((layer_name, layer_name.replace('transformer', 'model'))) if 'transformer.enc_out_bbox_embed' in layer_name: rename_keys.append((layer_name, layer_name.replace('transformer.enc_out_bbox_embed', 'model.encoder_output_bbox_embed'))) rename_keys.append(('transformer.level_embed', 'model.level_embed')) rename_keys.append(('transformer.decoder.norm.weight', 'model.decoder.layer_norm.weight')) rename_keys.append(('transformer.decoder.norm.bias', 'model.decoder.layer_norm.bias')) rename_keys.append(('transformer.tgt_embed.weight', 'model.query_position_embeddings.weight')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_k_v_encoder(state_dict, config): embed_dim = config.backbone_config.embed_dim for (layer, depth) in enumerate(config.backbone_config.depths): hidden_size = embed_dim * 2 ** layer for block in range(depth): in_proj_weight = state_dict.pop(f'backbone.0.layers.{layer}.blocks.{block}.attn.qkv.weight') in_proj_bias = state_dict.pop(f'backbone.0.layers.{layer}.blocks.{block}.attn.qkv.bias') state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'model.backbone.conv_encoder.model.encoder.layers.{layer}.blocks.{block}.attention.self.value.bias'] = in_proj_bias[-hidden_size:] def read_in_q_k_v_text_enhancer(state_dict, config): hidden_size = config.hidden_size for idx in range(config.encoder_layers): in_proj_weight = state_dict.pop(f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.in_proj_bias') state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'model.encoder.layers.{idx}.text_enhancer_layer.self_attn.value.bias'] = in_proj_bias[-hidden_size:] def read_in_q_k_v_decoder(state_dict, config): hidden_size = config.hidden_size for idx in range(config.decoder_layers): in_proj_weight = state_dict.pop(f'model.decoder.layers.{idx}.self_attn.in_proj_weight') in_proj_bias = state_dict.pop(f'model.decoder.layers.{idx}.self_attn.in_proj_bias') state_dict[f'model.decoder.layers.{idx}.self_attn.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'model.decoder.layers.{idx}.self_attn.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'model.decoder.layers.{idx}.self_attn.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'model.decoder.layers.{idx}.self_attn.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'model.decoder.layers.{idx}.self_attn.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'model.decoder.layers.{idx}.self_attn.value.bias'] = in_proj_bias[-hidden_size:] in_proj_weight = state_dict.pop(f'model.decoder.layers.{idx}.encoder_attn_text.in_proj_weight') in_proj_bias = state_dict.pop(f'model.decoder.layers.{idx}.encoder_attn_text.in_proj_bias') state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.query.weight'] = in_proj_weight[:hidden_size, :] state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.query.bias'] = in_proj_bias[:hidden_size] state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.key.weight'] = in_proj_weight[hidden_size:hidden_size * 2, :] state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.key.bias'] = in_proj_bias[hidden_size:hidden_size * 2] state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.value.weight'] = in_proj_weight[-hidden_size:, :] state_dict[f'model.decoder.layers.{idx}.encoder_attn_text.value.bias'] = in_proj_bias[-hidden_size:] def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') return image def preprocess_caption(caption: str) -> str: result = caption.lower().strip() if result.endswith('.'): return result return result + '.' @torch.no_grad() def convert_grounding_dino_checkpoint(args): model_name = args.model_name pytorch_dump_folder_path = args.pytorch_dump_folder_path push_to_hub = args.push_to_hub verify_logits = args.verify_logits checkpoint_mapping = {'grounding-dino-tiny': 'https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swint_ogc.pth', 'grounding-dino-base': 'https://huggingface.co/ShilongLiu/GroundingDino/resolve/main/groundingdino_swinb_cogcoor.pth'} config = get_grounding_dino_config(model_name) checkpoint_url = checkpoint_mapping[model_name] original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location='cpu')['model'] original_state_dict = {k.replace('module.', ''): v for (k, v) in original_state_dict.items()} for (name, param) in original_state_dict.items(): print(name, param.shape) new_state_dict = original_state_dict.copy() rename_keys = create_rename_keys(original_state_dict, config) for (src, dest) in rename_keys: rename_key(new_state_dict, src, dest) read_in_q_k_v_encoder(new_state_dict, config) read_in_q_k_v_text_enhancer(new_state_dict, config) read_in_q_k_v_decoder(new_state_dict, config) model = GroundingDinoForObjectDetection(config) model.eval() (missing_keys, unexpected_keys) = model.load_state_dict(new_state_dict, strict=False) print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) image = prepare_img() transforms = T.Compose([T.Resize(size=800, max_size=1333), T.ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) original_pixel_values = transforms(image).unsqueeze(0) image_processor = GroundingDinoImageProcessor() tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') processor = GroundingDinoProcessor(image_processor=image_processor, tokenizer=tokenizer) text = 'a cat' inputs = processor(images=image, text=preprocess_caption(text), return_tensors='pt') assert torch.allclose(original_pixel_values, inputs.pixel_values, atol=0.0001) if verify_logits: with torch.no_grad(): outputs = model(**inputs) print(outputs.logits[0, :3, :3]) expected_slice = torch.tensor([[-4.8913, -0.19, -0.2161], [-4.9653, -0.3719, -0.395], [-5.9599, -3.3765, -3.3104]]) assert torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=0.0001) print('Looks ok!') if pytorch_dump_folder_path is not None: model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub(f'EduardoPacheco/{model_name}') processor.push_to_hub(f'EduardoPacheco/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='grounding-dino-tiny', type=str, choices=['grounding-dino-tiny', 'grounding-dino-base'], help="Name of the GroundingDino model you'd like to convert.") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.') parser.add_argument('--verify_logits', action='store_false', help='Whether or not to verify logits after conversion.') args = parser.parse_args() convert_grounding_dino_checkpoint(args) # File: transformers-main/src/transformers/models/grounding_dino/image_processing_grounding_dino.py """""" import io import pathlib from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments from ...utils import ExplicitEnum, TensorType, is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, logging if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats logger = logging.get_logger(__name__) AnnotationType = Dict[str, Union[int, str, List[Dict]]] class AnnotationFormat(ExplicitEnum): COCO_DETECTION = 'coco_detection' COCO_PANOPTIC = 'coco_panoptic' SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: (height, width) = image_size raw_size = None if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: raw_size = max_size * min_original_size / max_original_size size = int(round(raw_size)) if height <= width and height == size or (width <= height and width == size): (oh, ow) = (height, width) elif width < height: ow = size if max_size is not None and raw_size is not None: oh = int(raw_size * height / width) else: oh = int(size * height / width) else: oh = size if max_size is not None and raw_size is not None: ow = int(raw_size * width / height) else: ow = int(size * width / height) return (oh, ow) def get_resize_output_image_size(input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: image_size = get_image_size(input_image, input_data_format) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) def get_image_size_for_max_height_width(input_image: np.ndarray, max_height: int, max_width: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]: image_size = get_image_size(input_image, input_data_format) (height, width) = image_size height_scale = max_height / height width_scale = max_width / width min_scale = min(height_scale, width_scale) new_height = int(height * min_scale) new_width = int(width * min_scale) return (new_height, new_width) def get_numpy_to_framework_fn(arr) -> Callable: if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f'Cannot convert arrays of type {type(arr)}') def safe_squeeze(arr: np.ndarray, axis: Optional[int]=None) -> np.ndarray: if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: (image_height, image_width) = image_size norm_annotation = {} for (key, value) in annotation.items(): if key == 'boxes': boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation def max_across_indices(values: Iterable[Any]) -> List[Any]: return [max(values_i) for values_i in zip(*values)] def get_max_height_width(images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]: if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if input_data_format == ChannelDimension.FIRST: (_, max_height, max_width) = max_across_indices([img.shape for img in images]) elif input_data_format == ChannelDimension.LAST: (max_height, max_width, _) = max_across_indices([img.shape for img in images]) else: raise ValueError(f'Invalid channel dimension format: {input_data_format}') return (max_height, max_width) def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: try: from pycocotools import mask as coco_mask except ImportError: raise ImportError('Pycocotools is not installed in your environment.') masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool=False, input_data_format: Optional[Union[ChannelDimension, str]]=None): (image_height, image_width) = get_image_size(image, channel_dim=input_data_format) image_id = target['image_id'] image_id = np.asarray([image_id], dtype=np.int64) annotations = target['annotations'] annotations = [obj for obj in annotations if 'iscrowd' not in obj or obj['iscrowd'] == 0] classes = [obj['category_id'] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) area = np.asarray([obj['area'] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj['iscrowd'] if 'iscrowd' in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj['bbox'] for obj in annotations] boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target['image_id'] = image_id new_target['class_labels'] = classes[keep] new_target['boxes'] = boxes[keep] new_target['area'] = area[keep] new_target['iscrowd'] = iscrowd[keep] new_target['orig_size'] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and 'keypoints' in annotations[0]: keypoints = [obj['keypoints'] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) keypoints = keypoints[keep] num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target['keypoints'] = keypoints if return_segmentation_masks: segmentation_masks = [obj['segmentation'] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target['masks'] = masks[keep] return new_target def masks_to_boxes(masks: np.ndarray) -> np.ndarray: if masks.size == 0: return np.zeros((0, 4)) (h, w) = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) (y, x) = np.meshgrid(y, x, indexing='ij') x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool)) x_min = x.filled(fill_value=100000000.0) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool)) y_min = y.filled(fill_value=100000000.0) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) def prepare_coco_panoptic_annotation(image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool=True, input_data_format: Union[ChannelDimension, str]=None) -> Dict: (image_height, image_width) = get_image_size(image, channel_dim=input_data_format) annotation_path = pathlib.Path(masks_path) / target['file_name'] new_target = {} new_target['image_id'] = np.asarray([target['image_id'] if 'image_id' in target else target['id']], dtype=np.int64) new_target['size'] = np.asarray([image_height, image_width], dtype=np.int64) new_target['orig_size'] = np.asarray([image_height, image_width], dtype=np.int64) if 'segments_info' in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info['id'] for segment_info in target['segments_info']]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target['masks'] = masks new_target['boxes'] = masks_to_boxes(masks) new_target['class_labels'] = np.array([segment_info['category_id'] for segment_info in target['segments_info']], dtype=np.int64) new_target['iscrowd'] = np.asarray([segment_info['iscrowd'] for segment_info in target['segments_info']], dtype=np.int64) new_target['area'] = np.asarray([segment_info['area'] for segment_info in target['segments_info']], dtype=np.float32) return new_target def get_segmentation_image(masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False): (h, w) = input_size (final_h, final_w) = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: (final_h, final_w) = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) (scores, labels) = (scores.squeeze(-1), labels.squeeze(-1)) return (scores, labels) def post_process_panoptic_sample(out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85) -> Dict: (scores, labels) = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError('Not as many boxes as there are classes') cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) (b, h, w) = cur_masks.shape cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for (k, label) in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) if cur_classes.size() > 0: filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [{'id': i, 'isthing': is_thing_map[cat], 'category_id': int(cat), 'area': a} for (i, (cat, a)) in enumerate(zip(cur_classes, area))] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format='PNG') predictions = {'png_string': out.getvalue(), 'segments_info': segments_info} return predictions def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST): ratios = tuple((float(s) / float(s_orig) for (s, s_orig) in zip(target_size, orig_size))) (ratio_height, ratio_width) = ratios new_annotation = {} new_annotation['size'] = target_size for (key, value) in annotation.items(): if key == 'boxes': boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation['boxes'] = scaled_boxes elif key == 'area': area = value scaled_area = area * (ratio_width * ratio_height) new_annotation['area'] = scaled_area elif key == 'masks': masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation['masks'] = masks elif key == 'size': new_annotation['size'] = target_size else: new_annotation[key] = value return new_annotation def binary_mask_to_rle(mask): if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) def convert_segmentation_to_rle(segmentation): segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): if not masks.shape[0] == scores.shape[0] == labels.shape[0]: raise ValueError('mask, scores and labels must have the same shape!') to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return (masks[to_keep], scores[to_keep], labels[to_keep]) def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): mask_k = mask_labels == k mask_k_area = mask_k.sum() original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return (mask_exists, mask_k) def compute_segments(mask_probs, pred_scores, pred_labels, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_size: Tuple[int, int]=None): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate(mask_probs.unsqueeze(0), size=target_size, mode='bilinear', align_corners=False)[0] current_segment_id = 0 mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse (mask_exists, mask_k) = check_segment_validity(mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append({'id': current_segment_id, 'label_id': pred_class, 'was_fused': should_fuse, 'score': segment_score}) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return (segmentation, segments) class GroundingDinoImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values', 'pixel_mask'] def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Union[float, List[float]]=None, image_std: Union[float, List[float]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None: if 'pad_and_return_pixel_mask' in kwargs: do_pad = kwargs.pop('pad_and_return_pixel_mask') if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None if size is None else 1333 size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self._valid_processor_keys = ['images', 'annotations', 'return_segmentation_masks', 'masks_path', 'do_resize', 'size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'do_convert_annotations', 'image_mean', 'image_std', 'do_pad', 'pad_size', 'format', 'return_tensors', 'data_format', 'input_data_format'] @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): image_processor_dict = image_processor_dict.copy() if 'max_size' in kwargs: image_processor_dict['max_size'] = kwargs.pop('max_size') if 'pad_and_return_pixel_mask' in kwargs: image_processor_dict['pad_and_return_pixel_mask'] = kwargs.pop('pad_and_return_pixel_mask') return super().from_dict(image_processor_dict, **kwargs) def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict: format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format) else: raise ValueError(f'Format {format} is not supported.') return target def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if 'shortest_edge' in size and 'longest_edge' in size: new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format) elif 'max_height' in size and 'max_width' in size: new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format) elif 'height' in size and 'width' in size: new_size = (size['height'], size['width']) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.") image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return image def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict: return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: return normalize_annotation(annotation, image_size=image_size) def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict: new_annotation = {} new_annotation['size'] = output_image_size for (key, value) in annotation.items(): if key == 'masks': masks = value masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST) masks = safe_squeeze(masks, 1) new_annotation['masks'] = masks elif key == 'boxes' and update_bboxes: boxes = value boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]]) new_annotation['boxes'] = boxes elif key == 'size': new_annotation['size'] = output_image_size else: new_annotation[key] = value return new_annotation def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) if annotation is not None: annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes) return (padded_image, annotation) def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature: pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size['height'], pad_size['width']) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) annotation_list = annotations if annotations is not None else [None] * len(images) padded_images = [] padded_annotations = [] for (image, annotation) in zip(images, annotation_list): (padded_image, padded_annotation) = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes) padded_images.append(padded_image) padded_annotations.append(padded_annotation) data = {'pixel_values': padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images] data['pixel_mask'] = masks encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations] return encoded_inputs def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, return_segmentation_masks: bool=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature: if 'pad_and_return_pixel_mask' in kwargs: logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.') do_pad = kwargs.pop('pad_and_return_pixel_mask') max_size = None if 'max_size' in kwargs: logger.warning_once("The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.") size = kwargs.pop('max_size') do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations do_pad = self.do_pad if do_pad is None else do_pad pad_size = self.pad_size if pad_size is None else pad_size format = self.format if format is None else format images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.') format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))): raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.') images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if annotations is not None: prepared_images = [] prepared_annotations = [] for (image, target) in zip(images, annotations): target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations if do_resize: if annotations is not None: (resized_images, resized_annotations) = ([], []) for (image, target) in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images] if do_convert_annotations and annotations is not None: annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for (annotation, image) in zip(annotations, images)] if do_pad: encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size) else: images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations] return encoded_inputs def post_process_object_detection(self, outputs, threshold: float=0.1, target_sizes: Union[TensorType, List[Tuple]]=None): (logits, boxes) = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices boxes = center_to_corners_format(boxes) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: (img_h, img_w) = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for (s, l, b) in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results # File: transformers-main/src/transformers/models/grounding_dino/modeling_grounding_dino.py """""" import copy import math import os import warnings from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import Tensor, nn from torch.autograd import Function from torch.autograd.function import once_differentiable from ...activations import ACT2FN from ...file_utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_torch_cuda_available, is_vision_available, replace_return_docstrings, requires_backends from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import is_accelerate_available, is_ninja_available, logging from ...utils.backbone_utils import load_backbone from ..auto import AutoModel from .configuration_grounding_dino import GroundingDinoConfig if is_vision_available(): from transformers.image_transforms import center_to_corners_format if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model logger = logging.get_logger(__name__) MultiScaleDeformableAttention = None def load_cuda_kernels(): from torch.utils.cpp_extension import load global MultiScaleDeformableAttention root = Path(__file__).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' src_files = [root / filename for filename in ['vision.cpp', os.path.join('cpu', 'ms_deform_attn_cpu.cpp'), os.path.join('cuda', 'ms_deform_attn_cuda.cu')]] MultiScaleDeformableAttention = load('MultiScaleDeformableAttention', src_files, with_cuda=True, extra_include_paths=[str(root)], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']) class MultiScaleDeformableAttentionFunction(Function): @staticmethod def forward(context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step): context.im2col_step = im2col_step output = MultiScaleDeformableAttention.ms_deform_attn_forward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step) context.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) return output @staticmethod @once_differentiable def backward(context, grad_output): (value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) = context.saved_tensors (grad_value, grad_sampling_loc, grad_attn_weight) = MultiScaleDeformableAttention.ms_deform_attn_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step) return (grad_value, None, None, grad_sampling_loc, grad_attn_weight, None) logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'GroundingDinoConfig' _CHECKPOINT_FOR_DOC = 'IDEA-Research/grounding-dino-tiny' @dataclass class GroundingDinoDecoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class GroundingDinoEncoderOutput(ModelOutput): last_hidden_state_vision: torch.FloatTensor = None last_hidden_state_text: torch.FloatTensor = None vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class GroundingDinoModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None init_reference_points: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None encoder_vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None @dataclass class GroundingDinoObjectDetectionOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None init_reference_points: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None encoder_vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_text_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None class GroundingDinoFrozenBatchNorm2d(nn.Module): def __init__(self, n): super().__init__() self.register_buffer('weight', torch.ones(n)) self.register_buffer('bias', torch.zeros(n)) self.register_buffer('running_mean', torch.zeros(n)) self.register_buffer('running_var', torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-05 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias def replace_batch_norm(model): for (name, module) in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = GroundingDinoFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module) class GroundingDinoConvEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ['timm']) backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, **config.backbone_kwargs) else: backbone = load_backbone(config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError('Either `backbone` or `backbone_config` should be provided in the config') if 'resnet' in backbone_model_type: for (name, parameter) in self.model.named_parameters(): if config.use_timm_backbone: if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name): parameter.requires_grad_(False) elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name): parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out class GroundingDinoConvModel(nn.Module): def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for (feature_map, mask) in out: pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return (out, pos) class GroundingDinoSinePositionEmbedding(nn.Module): def __init__(self, config): super().__init__() self.embedding_dim = config.d_model // 2 self.temperature = config.positional_embedding_temperature self.scale = 2 * math.pi def forward(self, pixel_values, pixel_mask): y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) eps = 1e-06 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos class GroundingDinoLearnedPositionEmbedding(nn.Module): def __init__(self, config): super().__init__() embedding_dim = config.d_model // 2 self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): (height, width) = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos def build_position_encoding(config): if config.position_embedding_type == 'sine': position_embedding = GroundingDinoSinePositionEmbedding(config) elif config.position_embedding_type == 'learned': position_embedding = GroundingDinoLearnedPositionEmbedding(config) else: raise ValueError(f'Not supported {config.position_embedding_type}') return position_embedding def multi_scale_deformable_attention(value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor) -> Tensor: (batch_size, _, num_heads, hidden_dim) = value.shape (_, num_queries, num_heads, num_levels, num_points, _) = sampling_locations.shape value_list = value.split([height.item() * width.item() for (height, width) in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for (level_id, (height, width)) in enumerate(value_spatial_shapes): value_l_ = value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) sampling_value_l_ = nn.functional.grid_sample(value_l_, sampling_grid_l_, mode='bilinear', padding_mode='zeros', align_corners=False) sampling_value_list.append(sampling_value_l_) attention_weights = attention_weights.transpose(1, 2).reshape(batch_size * num_heads, 1, num_queries, num_levels * num_points) output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(batch_size, num_heads * hidden_dim, num_queries) return output.transpose(1, 2).contiguous() class GroundingDinoMultiscaleDeformableAttention(nn.Module): def __init__(self, config: GroundingDinoConfig, num_heads: int, n_points: int): super().__init__() kernel_loaded = MultiScaleDeformableAttention is not None if is_torch_cuda_available() and is_ninja_available() and (not kernel_loaded): try: load_cuda_kernels() except Exception as e: logger.warning(f'Could not load the custom kernel for multi-scale deformable attention: {e}') if config.d_model % num_heads != 0: raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}') dim_per_head = config.d_model // num_heads if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0): warnings.warn("You'd better set embed_dim (d_model) in GroundingDinoMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.") self.im2col_step = 64 self.d_model = config.d_model self.n_levels = config.num_feature_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) self.value_proj = nn.Linear(config.d_model, config.d_model) self.output_proj = nn.Linear(config.d_model, config.d_model) self.disable_custom_kernels = config.disable_custom_kernels self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) default_dtype = torch.get_default_dtype() thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False): if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) (batch_size, num_queries, _) = hidden_states.shape (batch_size, sequence_length, _) = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states') value = self.value_proj(encoder_hidden_states) if attention_mask is not None: value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2) attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points) attention_weights = F.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points) num_coordinates = reference_points.shape[-1] if num_coordinates == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] elif num_coordinates == 4: sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 else: raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}') if self.disable_custom_kernels: output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) else: try: output = MultiScaleDeformableAttentionFunction.apply(value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step) except Exception: output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return (output, attention_weights) class GroundingDinoTextEnhancerLayer(nn.Module): def __init__(self, config): super().__init__() self.self_attn = GroundingDinoMultiheadAttention(config, num_attention_heads=config.encoder_attention_heads // 2) self.fc1 = nn.Linear(config.d_model, config.encoder_ffn_dim // 2) self.fc2 = nn.Linear(config.encoder_ffn_dim // 2, config.d_model) self.layer_norm_before = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.layer_norm_after = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.activation = ACT2FN[config.activation_function] self.num_heads = config.encoder_attention_heads // 2 self.dropout = config.text_enhancer_dropout def with_pos_embed(self, hidden_state: Tensor, position_embeddings: Optional[Tensor]): return hidden_state if position_embeddings is None else hidden_state + position_embeddings def forward(self, hidden_states: torch.FloatTensor, attention_masks: Optional[torch.BoolTensor]=None, position_embeddings: Optional[torch.FloatTensor]=None) -> Tuple[torch.FloatTensor, torch.FloatTensor]: if attention_masks.dim() == 3 and attention_masks.shape[0] == hidden_states.shape[0]: attention_masks = attention_masks[:, None, :, :] attention_masks = attention_masks.repeat(1, self.num_heads, 1, 1) dtype = hidden_states.dtype attention_masks = attention_masks.to(dtype=dtype) attention_masks = (1.0 - attention_masks) * torch.finfo(dtype).min queries = keys = self.with_pos_embed(hidden_states, position_embeddings) (attention_output, attention_weights) = self.self_attn(queries=queries, keys=keys, values=hidden_states, attention_mask=attention_masks, output_attentions=True) attention_output = nn.functional.dropout(attention_output, p=self.dropout, training=self.training) hidden_states = hidden_states + attention_output hidden_states = self.layer_norm_before(hidden_states) residual = hidden_states hidden_states = self.activation(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = hidden_states + residual hidden_states = self.layer_norm_after(hidden_states) return (hidden_states, attention_weights) class GroundingDinoBiMultiHeadAttention(nn.Module): def __init__(self, config): super().__init__() vision_dim = text_dim = config.d_model embed_dim = config.encoder_ffn_dim // 2 num_heads = config.encoder_attention_heads // 2 dropout = config.fusion_dropout self.embed_dim = embed_dim self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.vision_dim = vision_dim self.text_dim = text_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'`embed_dim` must be divisible by `num_heads` (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = dropout self.vision_proj = nn.Linear(self.vision_dim, self.embed_dim) self.text_proj = nn.Linear(self.text_dim, self.embed_dim) self.values_vision_proj = nn.Linear(self.vision_dim, self.embed_dim) self.values_text_proj = nn.Linear(self.text_dim, self.embed_dim) self.out_vision_proj = nn.Linear(self.embed_dim, self.vision_dim) self.out_text_proj = nn.Linear(self.embed_dim, self.text_dim) def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, vision_features: torch.FloatTensor, text_features: torch.FloatTensor, vision_attention_mask: Optional[torch.BoolTensor]=None, text_attention_mask: Optional[torch.BoolTensor]=None) -> Tuple[Tuple[torch.FloatTensor, torch.FloatTensor], Tuple[torch.FloatTensor, torch.FloatTensor]]: (batch_size, tgt_len, _) = vision_features.size() vision_query_states = self.vision_proj(vision_features) * self.scale vision_query_states = self._reshape(vision_query_states, tgt_len, batch_size) text_key_states = self.text_proj(text_features) text_key_states = self._reshape(text_key_states, -1, batch_size) vision_value_states = self.values_vision_proj(vision_features) vision_value_states = self._reshape(vision_value_states, -1, batch_size) text_value_states = self.values_text_proj(text_features) text_value_states = self._reshape(text_value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) vision_query_states = vision_query_states.view(*proj_shape) text_key_states = text_key_states.view(*proj_shape) vision_value_states = vision_value_states.view(*proj_shape) text_value_states = text_value_states.view(*proj_shape) src_len = text_key_states.size(1) attn_weights = torch.bmm(vision_query_states, text_key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') attn_weights = attn_weights - attn_weights.max() attn_weights = torch.clamp(attn_weights, min=-50000, max=50000) attn_weights_transposed = attn_weights.transpose(1, 2) text_attn_weights = attn_weights_transposed - torch.max(attn_weights_transposed, dim=-1, keepdim=True)[0] text_attn_weights = torch.clamp(text_attn_weights, min=-50000, max=50000) if vision_attention_mask is not None: vision_attention_mask = vision_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) text_attn_weights.masked_fill_(vision_attention_mask, float('-inf')) text_attn_weights = text_attn_weights.softmax(dim=-1) if text_attention_mask is not None: text_attention_mask = text_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) attn_weights.masked_fill_(text_attention_mask, float('-inf')) vision_attn_weights = attn_weights.softmax(dim=-1) vision_attn_probs = F.dropout(vision_attn_weights, p=self.dropout, training=self.training) text_attn_probs = F.dropout(text_attn_weights, p=self.dropout, training=self.training) vision_attn_output = torch.bmm(vision_attn_probs, text_value_states) text_attn_output = torch.bmm(text_attn_probs, vision_value_states) if vision_attn_output.size() != (batch_size * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`vision_attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {vision_attn_output.size()}') if text_attn_output.size() != (batch_size * self.num_heads, src_len, self.head_dim): raise ValueError(f'`text_attn_output` should be of size {(batch_size, self.num_heads, src_len, self.head_dim)}, but is {text_attn_output.size()}') vision_attn_output = vision_attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim) vision_attn_output = vision_attn_output.transpose(1, 2) vision_attn_output = vision_attn_output.reshape(batch_size, tgt_len, self.embed_dim) text_attn_output = text_attn_output.view(batch_size, self.num_heads, src_len, self.head_dim) text_attn_output = text_attn_output.transpose(1, 2) text_attn_output = text_attn_output.reshape(batch_size, src_len, self.embed_dim) vision_attn_output = self.out_vision_proj(vision_attn_output) text_attn_output = self.out_text_proj(text_attn_output) return ((vision_attn_output, vision_attn_weights), (text_attn_output, text_attn_weights)) def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class GroundingDinoDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class GroundingDinoFusionLayer(nn.Module): def __init__(self, config): super().__init__() drop_path = config.fusion_droppath self.layer_norm_vision = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.layer_norm_text = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.attn = GroundingDinoBiMultiHeadAttention(config) self.drop_path = GroundingDinoDropPath(drop_path) if drop_path > 0.0 else nn.Identity() init_values = 0.0001 self.vision_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True) self.text_param = nn.Parameter(init_values * torch.ones(config.d_model), requires_grad=True) def forward(self, vision_features: torch.FloatTensor, text_features: torch.FloatTensor, attention_mask_vision: Optional[torch.BoolTensor]=None, attention_mask_text: Optional[torch.BoolTensor]=None) -> Tuple[Tuple[torch.FloatTensor, torch.FloatTensor], Tuple[torch.FloatTensor, torch.FloatTensor]]: vision_features = self.layer_norm_vision(vision_features) text_features = self.layer_norm_text(text_features) ((delta_v, vision_attn), (delta_t, text_attn)) = self.attn(vision_features, text_features, vision_attention_mask=attention_mask_vision, text_attention_mask=attention_mask_text) vision_features = vision_features + self.drop_path(self.vision_param * delta_v) text_features = text_features + self.drop_path(self.text_param * delta_t) return ((vision_features, vision_attn), (text_features, text_attn)) class GroundingDinoDeformableLayer(nn.Module): def __init__(self, config: GroundingDinoConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = GroundingDinoMultiscaleDeformableAttention(config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False): residual = hidden_states (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return (hidden_states, attn_weights) def get_sine_pos_embed(pos_tensor: torch.Tensor, num_pos_feats: int=128, temperature: int=10000, exchange_xy: bool=True) -> Tensor: scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats) def sine_func(x: torch.Tensor): sin_x = x * scale / dim_t sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) return sin_x pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1) position_embeddings = [sine_func(x) for x in pos_tensor] if exchange_xy: (position_embeddings[0], position_embeddings[1]) = (position_embeddings[1], position_embeddings[0]) position_embeddings = torch.cat(position_embeddings, dim=-1) return position_embeddings class GroundingDinoEncoderLayer(nn.Module): def __init__(self, config) -> None: super().__init__() self.d_model = config.d_model self.text_enhancer_layer = GroundingDinoTextEnhancerLayer(config) self.fusion_layer = GroundingDinoFusionLayer(config) self.deformable_layer = GroundingDinoDeformableLayer(config) def get_text_position_embeddings(self, text_features: Tensor, text_position_embedding: Optional[torch.Tensor], text_position_ids: Optional[torch.Tensor]) -> Tensor: (batch_size, seq_length, _) = text_features.shape if text_position_embedding is None and text_position_ids is None: text_position_embedding = torch.arange(seq_length, device=text_features.device) text_position_embedding = text_position_embedding.float() text_position_embedding = text_position_embedding.unsqueeze(0).unsqueeze(-1) text_position_embedding = text_position_embedding.repeat(batch_size, 1, 1) text_position_embedding = get_sine_pos_embed(text_position_embedding, num_pos_feats=self.d_model, exchange_xy=False) if text_position_ids is not None: text_position_embedding = get_sine_pos_embed(text_position_ids[..., None], num_pos_feats=self.d_model, exchange_xy=False) return text_position_embedding def forward(self, vision_features: Tensor, vision_position_embedding: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, key_padding_mask: Tensor, reference_points: Tensor, text_features: Optional[Tensor]=None, text_attention_mask: Optional[Tensor]=None, text_position_embedding: Optional[Tensor]=None, text_self_attention_masks: Optional[Tensor]=None, text_position_ids: Optional[Tensor]=None): text_position_embedding = self.get_text_position_embeddings(text_features, text_position_embedding, text_position_ids) ((vision_features, vision_fused_attn), (text_features, text_fused_attn)) = self.fusion_layer(vision_features=vision_features, text_features=text_features, attention_mask_vision=key_padding_mask, attention_mask_text=text_attention_mask) (text_features, text_enhanced_attn) = self.text_enhancer_layer(hidden_states=text_features, attention_masks=~text_self_attention_masks, position_embeddings=text_position_embedding if text_position_embedding is not None else None) (vision_features, vision_deformable_attn) = self.deformable_layer(hidden_states=vision_features, attention_mask=~key_padding_mask, position_embeddings=vision_position_embedding, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index) return ((vision_features, text_features), (vision_fused_attn, text_fused_attn, text_enhanced_attn, vision_deformable_attn)) class GroundingDinoMultiheadAttention(nn.Module): def __init__(self, config, num_attention_heads=None): super().__init__() if config.hidden_size % num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({num_attention_heads})') self.num_attention_heads = num_attention_heads self.attention_head_size = int(config.hidden_size / num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.attention_dropout) def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(queries)) key_layer = self.transpose_for_scores(self.key(keys)) value_layer = self.transpose_for_scores(self.value(values)) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) context_layer = self.out_proj(context_layer) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class GroundingDinoDecoderLayer(nn.Module): def __init__(self, config: GroundingDinoConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = GroundingDinoMultiheadAttention(config, num_attention_heads=config.decoder_attention_heads) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) self.encoder_attn_text = GroundingDinoMultiheadAttention(config, num_attention_heads=config.decoder_attention_heads) self.encoder_attn_text_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) self.encoder_attn = GroundingDinoMultiscaleDeformableAttention(config, num_heads=config.decoder_attention_heads, n_points=config.decoder_n_points) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, vision_encoder_hidden_states: Optional[torch.Tensor]=None, vision_encoder_attention_mask: Optional[torch.Tensor]=None, text_encoder_hidden_states: Optional[torch.Tensor]=None, text_encoder_attention_mask: Optional[torch.Tensor]=None, self_attn_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False): residual = hidden_states queries = keys = self.with_pos_embed(hidden_states, position_embeddings) (hidden_states, self_attn_weights) = self.self_attn(queries=queries, keys=keys, values=hidden_states, attention_mask=self_attn_mask, output_attentions=True) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states queries = self.with_pos_embed(hidden_states, position_embeddings) (hidden_states, text_cross_attn_weights) = self.encoder_attn_text(queries=queries, keys=text_encoder_hidden_states, values=text_encoder_hidden_states, attention_mask=text_encoder_attention_mask, output_attentions=True) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_text_layer_norm(hidden_states) third_residual = hidden_states cross_attn_weights = None (hidden_states, cross_attn_weights) = self.encoder_attn(hidden_states=hidden_states, attention_mask=vision_encoder_attention_mask, encoder_hidden_states=vision_encoder_hidden_states, encoder_attention_mask=vision_encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = third_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, text_cross_attn_weights, cross_attn_weights) return outputs class GroundingDinoContrastiveEmbedding(nn.Module): def __init__(self, config): super().__init__() self.max_text_len = config.max_text_len def forward(self, vision_hidden_state: torch.FloatTensor, text_hidden_state: torch.FloatTensor, text_token_mask: torch.BoolTensor) -> torch.FloatTensor: output = vision_hidden_state @ text_hidden_state.transpose(-1, -2) output = output.masked_fill(~text_token_mask[:, None, :], float('-inf')) new_output = torch.full((*output.shape[:-1], self.max_text_len), float('-inf'), device=output.device) new_output[..., :output.shape[-1]] = output return new_output class GroundingDinoPreTrainedModel(PreTrainedModel): config_class = GroundingDinoConfig base_model_prefix = 'model' main_input_name = 'pixel_values' def _init_weights(self, module): std = self.config.init_std if isinstance(module, GroundingDinoLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, GroundingDinoMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, GroundingDinoBiMultiHeadAttention): nn.init.xavier_uniform_(module.vision_proj.weight) module.vision_proj.bias.data.fill_(0) nn.init.xavier_uniform_(module.text_proj.weight) module.text_proj.bias.data.fill_(0) nn.init.xavier_uniform_(module.values_vision_proj.weight) module.values_vision_proj.bias.data.fill_(0) nn.init.xavier_uniform_(module.values_text_proj.weight) module.values_text_proj.bias.data.fill_(0) nn.init.xavier_uniform_(module.out_vision_proj.weight) module.out_vision_proj.bias.data.fill_(0) nn.init.xavier_uniform_(module.out_text_proj.weight) module.out_text_proj.bias.data.fill_(0) elif isinstance(module, (GroundingDinoEncoderLayer, GroundingDinoDecoderLayer)): for p in module.parameters(): if p.dim() > 1: nn.init.normal_(p, mean=0.0, std=std) elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, GroundingDinoMLPPredictionHead): nn.init.constant_(module.layers[-1].weight.data, 0) nn.init.constant_(module.layers[-1].bias.data, 0) if hasattr(module, 'reference_points') and (not self.config.two_stage): nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, 'level_embed'): nn.init.normal_(module.level_embed) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, GroundingDinoDecoder): module.gradient_checkpointing = value GROUNDING_DINO_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`GroundingDinoConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GROUNDING_DINO_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it.\n\n Pixel values can be obtained using [`AutoImageProcessor`]. See [`GroundingDinoImageProcessor.__call__`] for\n details.\n\n input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.\n\n token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token\n\n [What are token type IDs?](../glossary#token-type-ids)\n\n attention_mask (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are real (i.e. **not masked**),\n - 0 for tokens that are padding (i.e. **masked**).\n\n [What are attention masks?](../glossary#attention-mask)\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n\n [What are attention masks?](../glossary#attention-mask)\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state_vision`, *optional*: `last_hidden_state_text`, *optional*:\n `vision_hidden_states`, *optional*: `text_hidden_states`, *optional*: `attentions`)\n `last_hidden_state_vision` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence\n of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the\n decoder.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n' class GroundingDinoEncoder(GroundingDinoPreTrainedModel): def __init__(self, config: GroundingDinoConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([GroundingDinoEncoderLayer(config) for _ in range(config.encoder_layers)]) self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): reference_points_list = [] for (level, (height, width)) in enumerate(spatial_shapes): (ref_y, ref_x) = meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing='ij') ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward(self, vision_features: Tensor, vision_attention_mask: Tensor, vision_position_embedding: Tensor, spatial_shapes: Tensor, level_start_index: Tensor, valid_ratios=None, text_features: Optional[Tensor]=None, text_attention_mask: Optional[Tensor]=None, text_position_embedding: Optional[Tensor]=None, text_self_attention_masks: Optional[Tensor]=None, text_position_ids: Optional[Tensor]=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=vision_features.device) encoder_vision_states = () if output_hidden_states else None encoder_text_states = () if output_hidden_states else None all_attns = () if output_attentions else None all_attn_fused_text = () if output_attentions else None all_attn_fused_vision = () if output_attentions else None all_attn_enhanced_text = () if output_attentions else None all_attn_deformable = () if output_attentions else None for (i, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_vision_states += (vision_features,) encoder_text_states += (text_features,) ((vision_features, text_features), attentions) = encoder_layer(vision_features=vision_features, vision_position_embedding=vision_position_embedding, spatial_shapes=spatial_shapes, level_start_index=level_start_index, key_padding_mask=vision_attention_mask, reference_points=reference_points, text_features=text_features, text_attention_mask=text_attention_mask, text_position_embedding=text_position_embedding, text_self_attention_masks=text_self_attention_masks, text_position_ids=text_position_ids) if output_attentions: all_attn_fused_vision += (attentions[0],) all_attn_fused_text += (attentions[1],) all_attn_enhanced_text += (attentions[2],) all_attn_deformable += (attentions[3],) if output_hidden_states: encoder_vision_states += (vision_features,) encoder_text_states += (text_features,) if output_attentions: all_attns = (all_attn_fused_vision, all_attn_fused_text, all_attn_enhanced_text, all_attn_deformable) if not return_dict: enc_outputs = [vision_features, text_features, encoder_vision_states, encoder_text_states, all_attns] return tuple((v for v in enc_outputs if v is not None)) return GroundingDinoEncoderOutput(last_hidden_state_vision=vision_features, last_hidden_state_text=text_features, vision_hidden_states=encoder_vision_states, text_hidden_states=encoder_text_states, attentions=all_attns) class GroundingDinoDecoder(GroundingDinoPreTrainedModel): def __init__(self, config: GroundingDinoConfig): super().__init__(config) self.dropout = config.dropout self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) self.layers = nn.ModuleList([GroundingDinoDecoderLayer(config) for _ in range(config.decoder_layers)]) self.reference_points_head = GroundingDinoMLPPredictionHead(config.query_dim // 2 * config.d_model, config.d_model, config.d_model, 2) self.gradient_checkpointing = False self.bbox_embed = None self.class_embed = None self.query_scale = None self.post_init() def forward(self, inputs_embeds, vision_encoder_hidden_states, vision_encoder_attention_mask=None, text_encoder_hidden_states=None, text_encoder_attention_mask=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, self_attn_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_attns = () if output_attentions else None all_cross_attns_vision = () if output_attentions and vision_encoder_hidden_states is not None else None all_cross_attns_text = () if output_attentions and text_encoder_hidden_states is not None else None intermediate = () intermediate_reference_points = () if text_encoder_attention_mask is not None: dtype = text_encoder_hidden_states.dtype text_encoder_attention_mask = text_encoder_attention_mask[:, None, None, :] text_encoder_attention_mask = text_encoder_attention_mask.repeat(1, self.config.decoder_attention_heads, self.config.num_queries, 1) text_encoder_attention_mask = text_encoder_attention_mask.to(dtype=dtype) text_encoder_attention_mask = text_encoder_attention_mask * torch.finfo(dtype).min for (idx, decoder_layer) in enumerate(self.layers): num_coordinates = reference_points.shape[-1] if num_coordinates == 4: reference_points_input = reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] elif num_coordinates == 2: reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] else: raise ValueError('Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}') query_pos = get_sine_pos_embed(reference_points_input[:, :, 0, :], num_pos_feats=self.config.d_model // 2) query_pos = self.reference_points_head(query_pos) if output_hidden_states: all_hidden_states += (self.layer_norm(hidden_states),) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint(create_custom_forward(decoder_layer), hidden_states, query_pos, reference_points_input, spatial_shapes, level_start_index, vision_encoder_hidden_states, vision_encoder_attention_mask, text_encoder_hidden_states, text_encoder_attention_mask, self_attn_mask, None) else: layer_outputs = decoder_layer(hidden_states=hidden_states, position_embeddings=query_pos, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, vision_encoder_hidden_states=vision_encoder_hidden_states, vision_encoder_attention_mask=vision_encoder_attention_mask, text_encoder_hidden_states=text_encoder_hidden_states, text_encoder_attention_mask=text_encoder_attention_mask, self_attn_mask=self_attn_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) num_coordinates = reference_points.shape[-1] if num_coordinates == 4: new_reference_points = tmp + torch.special.logit(reference_points, eps=1e-05) new_reference_points = new_reference_points.sigmoid() elif num_coordinates == 2: new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + torch.special.logit(reference_points, eps=1e-05) new_reference_points = new_reference_points.sigmoid() else: raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}') reference_points = new_reference_points.detach() intermediate += (self.layer_norm(hidden_states),) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if text_encoder_hidden_states is not None: all_cross_attns_text += (layer_outputs[2],) if vision_encoder_hidden_states is not None: all_cross_attns_vision += (layer_outputs[3],) intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if output_attentions: all_attns += (all_self_attns, all_cross_attns_text, all_cross_attns_vision) if not return_dict: return tuple((v for v in [hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_attns] if v is not None)) return GroundingDinoDecoderOutput(last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_attns) SPECIAL_TOKENS = [101, 102, 1012, 1029] def generate_masks_with_special_tokens_and_transfer_map(input_ids: torch.LongTensor) -> Tuple[Tensor, Tensor]: (batch_size, num_token) = input_ids.shape special_tokens_mask = torch.zeros((batch_size, num_token), device=input_ids.device).bool() for special_token in SPECIAL_TOKENS: special_tokens_mask |= input_ids == special_token idxs = torch.nonzero(special_tokens_mask) attention_mask = torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(batch_size, 1, 1) position_ids = torch.zeros((batch_size, num_token), device=input_ids.device) previous_col = 0 for i in range(idxs.shape[0]): (row, col) = idxs[i] if col == 0 or col == num_token - 1: attention_mask[row, col, col] = True position_ids[row, col] = 0 else: attention_mask[row, previous_col + 1:col + 1, previous_col + 1:col + 1] = True position_ids[row, previous_col + 1:col + 1] = torch.arange(0, col - previous_col, device=input_ids.device) previous_col = col return (attention_mask, position_ids.to(torch.long)) @add_start_docstrings('\n The bare Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states without any specific head on top.\n ', GROUNDING_DINO_START_DOCSTRING) class GroundingDinoModel(GroundingDinoPreTrainedModel): def __init__(self, config: GroundingDinoConfig): super().__init__(config) backbone = GroundingDinoConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = GroundingDinoConvModel(backbone, position_embeddings) if config.num_feature_levels > 1: num_backbone_outs = len(backbone.intermediate_channel_sizes) input_proj_list = [] for i in range(num_backbone_outs): in_channels = backbone.intermediate_channel_sizes[i] input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model))) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model))) in_channels = config.d_model self.input_proj_vision = nn.ModuleList(input_proj_list) else: self.input_proj_vision = nn.ModuleList([nn.Sequential(nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model))]) self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False, attn_implementation=config._attn_implementation) self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model) if config.embedding_init_target or not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = GroundingDinoEncoder(config) self.decoder = GroundingDinoDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) if config.two_stage_bbox_embed_share and config.decoder_bbox_embed_share and (self.decoder.bbox_embed is not None): self.encoder_output_bbox_embed = self.decoder.bbox_embed else: self.encoder_output_bbox_embed = GroundingDinoMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3) self.encoder_output_class_embed = GroundingDinoContrastiveEmbedding(config) else: self.reference_points = nn.Embedding(config.num_queries, 4) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for (name, param) in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for (name, param) in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) def get_valid_ratio(self, mask): (_, height, width) = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): batch_size = enc_output.shape[0] proposals = [] current_position = 0 for (level, (height, width)) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, current_position:current_position + height * width] mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) (grid_y, grid_x) = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij') grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * 2.0 ** level proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) current_position += height * width output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf')) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf')) object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) return (object_query, output_proposals) @add_start_docstrings_to_model_forward(GROUNDING_DINO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroundingDinoModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Tensor, input_ids: Tensor, token_type_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict (text_self_attention_masks, position_ids) = generate_masks_with_special_tokens_and_transfer_map(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) text_token_mask = attention_mask.bool() max_text_len = self.config.max_text_len if text_self_attention_masks.shape[1] > max_text_len: text_self_attention_masks = text_self_attention_masks[:, :max_text_len, :max_text_len] position_ids = position_ids[:, :max_text_len] input_ids = input_ids[:, :max_text_len] token_type_ids = token_type_ids[:, :max_text_len] text_token_mask = text_token_mask[:, :max_text_len] text_outputs = self.text_backbone(input_ids, text_self_attention_masks, token_type_ids, position_ids, return_dict=return_dict) text_features = text_outputs.last_hidden_state if return_dict else text_outputs[0] text_features = self.text_projection(text_features) (batch_size, num_channels, height, width) = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), dtype=torch.long, device=device) (vision_features, position_embeddings_list) = self.backbone(pixel_values, pixel_mask) feature_maps = [] masks = [] for (level, (source, mask)) in enumerate(vision_features): feature_maps.append(self.input_proj_vision[level](source)) masks.append(mask) if self.config.num_feature_levels > len(feature_maps): _len_sources = len(feature_maps) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj_vision[level](vision_features[-1][0]) else: source = self.input_proj_vision[level](feature_maps[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) feature_maps.append(source) masks.append(mask) position_embeddings_list.append(pos_l) query_embeds = None if self.config.embedding_init_target or self.config.two_stage: query_embeds = self.query_position_embeddings.weight source_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for (level, (source, mask, pos_embed)) in enumerate(zip(feature_maps, masks, position_embeddings_list)): (batch_size, num_channels, height, width) = source.shape spatial_shape = (height, width) spatial_shapes.append(spatial_shape) source = source.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten.append(source) mask_flatten.append(mask) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() if encoder_outputs is None: encoder_outputs = self.encoder(vision_features=source_flatten, vision_attention_mask=~mask_flatten, vision_position_embedding=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, text_features=text_features, text_attention_mask=~text_token_mask, text_position_embedding=None, text_self_attention_masks=~text_self_attention_masks, text_position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, GroundingDinoEncoderOutput)): encoder_outputs = GroundingDinoEncoderOutput(last_hidden_state_vision=encoder_outputs[0], last_hidden_state_text=encoder_outputs[1], vision_hidden_states=encoder_outputs[2] if output_hidden_states else None, text_hidden_states=encoder_outputs[3] if output_hidden_states else None, attentions=encoder_outputs[-1] if output_attentions else None) enc_outputs_class = None enc_outputs_coord_logits = None if self.config.two_stage: (object_query_embedding, output_proposals) = self.generate_encoder_output_proposals(encoder_outputs[0], ~mask_flatten, spatial_shapes) enc_outputs_class = self.encoder_output_class_embed(object_query_embedding, encoder_outputs[1], text_token_mask) delta_bbox = self.encoder_output_bbox_embed(object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals topk = self.config.num_queries topk_logits = enc_outputs_class.max(-1)[0] topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] topk_coords_logits = torch.gather(enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points if query_embeds is not None: target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1) else: target = torch.gather(object_query_embedding, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)).detach() else: target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1) reference_points = self.reference_points.weight.unsqueeze(0).repeat(batch_size, 1, 1).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder(inputs_embeds=target, vision_encoder_hidden_states=encoder_outputs[0], vision_encoder_attention_mask=mask_flatten, text_encoder_hidden_states=encoder_outputs[1], text_encoder_attention_mask=~text_token_mask, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, self_attn_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: enc_outputs = tuple((value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None)) tuple_outputs = (decoder_outputs[0], init_reference_points) + decoder_outputs[1:] + encoder_outputs + enc_outputs return tuple_outputs return GroundingDinoModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, init_reference_points=init_reference_points, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state_vision=encoder_outputs.last_hidden_state_vision, encoder_last_hidden_state_text=encoder_outputs.last_hidden_state_text, encoder_vision_hidden_states=encoder_outputs.vision_hidden_states, encoder_text_hidden_states=encoder_outputs.text_hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits) class GroundingDinoMLPPredictionHead(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim]))) def forward(self, x): for (i, layer) in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x def _upcast(t: Tensor) -> Tensor: if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() def box_area(boxes: Tensor) -> Tensor: boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (right_bottom - left_top).clamp(min=0) inter = width_height[:, :, 0] * width_height[:, :, 1] union = area1[:, None] + area2 - inter iou = inter / union return (iou, union) def generalized_box_iou(boxes1, boxes2): if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}') if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}') (iou, union) = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area def _max_by_axis(the_list): maxes = the_list[0] for sublist in the_list[1:]: for (index, item) in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def dice_loss(inputs, targets, num_boxes): inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float=0.25, gamma: float=2): prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none') p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * (1 - p_t) ** gamma if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return (self.tensors, self.mask) def __repr__(self): return str(self.tensors) def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size (batch_size, num_channels, height, width) = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for (img, pad_img, m) in zip(tensor_list, tensor, mask): pad_img[:img.shape[0], :img.shape[1], :img.shape[2]].copy_(img) m[:img.shape[1], :img.shape[2]] = False else: raise ValueError('Only 3-dimensional tensors are supported') return NestedTensor(tensor, mask) class GroundingDinoHungarianMatcher(nn.Module): def __init__(self, class_cost: float=1, bbox_cost: float=1, giou_cost: float=1): super().__init__() requires_backends(self, ['scipy']) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and (giou_cost == 0): raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): (batch_size, num_queries) = outputs['logits'].shape[:2] out_prob = outputs['logits'].flatten(0, 1).sigmoid() out_bbox = outputs['pred_boxes'].flatten(0, 1) target_ids = torch.cat([v['class_labels'] for v in targets]) target_bbox = torch.cat([v['boxes'] for v in targets]) alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * out_prob ** gamma * -(1 - out_prob + 1e-08).log() pos_cost_class = alpha * (1 - out_prob) ** gamma * -(out_prob + 1e-08).log() class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v['boxes']) for v in targets] indices = [linear_sum_assignment(c[i]) for (i, c) in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for (i, j) in indices] class GroundingDinoLoss(nn.Module): def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses def loss_labels(self, outputs, targets, indices, num_boxes): if 'logits' not in outputs: raise KeyError('No logits were found in the outputs') source_logits = outputs['logits'] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t['class_labels'][J] for (t, (_, J)) in zip(targets, indices)]) target_classes = torch.full(source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] losses = {'loss_ce': loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): logits = outputs['logits'] device = logits.device target_lengths = torch.as_tensor([len(v['class_labels']) for v in targets], device=device) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): if 'pred_boxes' not in outputs: raise KeyError('No predicted boxes found in outputs') idx = self._get_source_permutation_idx(indices) source_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes return losses def _get_source_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(source, i) for (i, (source, _)) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return (batch_idx, source_idx) def _get_target_permutation_idx(self, indices): batch_idx = torch.cat([torch.full_like(target, i) for (i, (_, target)) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return (batch_idx, target_idx) def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = {'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes} if loss not in loss_map: raise ValueError(f'Loss {loss} not supported') return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): outputs_without_aux = {k: v for (k, v) in outputs.items() if k != 'auxiliary_outputs' and k != 'enc_outputs'} indices = self.matcher(outputs_without_aux, targets) num_boxes = sum((len(t['class_labels']) for t in targets)) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) if 'auxiliary_outputs' in outputs: for (i, auxiliary_outputs) in enumerate(outputs['auxiliary_outputs']): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f'_{i}': v for (k, v) in l_dict.items()} losses.update(l_dict) if 'enc_outputs' in outputs: enc_outputs = outputs['enc_outputs'] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt['class_labels'] = torch.zeros_like(bt['class_labels']) indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes) l_dict = {k + '_enc': v for (k, v) in l_dict.items()} losses.update(l_dict) return losses @add_start_docstrings('\n Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top,\n for tasks such as COCO detection.\n ', GROUNDING_DINO_START_DOCSTRING) class GroundingDinoForObjectDetection(GroundingDinoPreTrainedModel): _tied_weights_keys = ['bbox_embed\\.[1-9]\\d*', 'model\\.decoder\\.bbox_embed\\.[0-9]\\d*'] def __init__(self, config: GroundingDinoConfig): super().__init__(config) self.model = GroundingDinoModel(config) _class_embed = GroundingDinoContrastiveEmbedding(config) if config.decoder_bbox_embed_share: _bbox_embed = GroundingDinoMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3) self.bbox_embed = nn.ModuleList([_bbox_embed for _ in range(config.decoder_layers)]) else: for _ in range(config.decoder_layers): _bbox_embed = GroundingDinoMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3) self.bbox_embed = nn.ModuleList([_bbox_embed for _ in range(config.decoder_layers)]) self.class_embed = nn.ModuleList([_class_embed for _ in range(config.decoder_layers)]) self.model.decoder.bbox_embed = self.bbox_embed self.model.decoder.class_embed = self.class_embed self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): return [{'logits': a, 'pred_boxes': b} for (a, b) in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(GROUNDING_DINO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroundingDinoObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor, token_type_ids: torch.LongTensor=None, attention_mask: torch.LongTensor=None, pixel_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[Union[GroundingDinoEncoderOutput, Tuple]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: List[Dict[str, Union[torch.LongTensor, torch.FloatTensor]]]=None): return_dict = return_dict if return_dict is not None else self.config.use_return_dict if attention_mask is None: attention_mask = torch.ones_like(input_ids) outputs = self.model(pixel_values=pixel_values, input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, pixel_mask=pixel_mask, encoder_outputs=encoder_outputs, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) idx = 5 + (1 if output_attentions else 0) + (1 if output_hidden_states else 0) enc_text_hidden_state = outputs.encoder_last_hidden_state_text if return_dict else outputs[idx] hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference_points = outputs.init_reference_points if return_dict else outputs[1] inter_references_points = outputs.intermediate_reference_points if return_dict else outputs[3] outputs_classes = [] outputs_coords = [] num_levels = hidden_states.shape[1] for level in range(num_levels): if level == 0: reference = init_reference_points else: reference = inter_references_points[:, level - 1] reference = torch.special.logit(reference, eps=1e-05) outputs_class = self.class_embed[level](vision_hidden_state=hidden_states[:, level], text_hidden_state=enc_text_hidden_state, text_token_mask=attention_mask.bool()) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) reference_coordinates = reference.shape[-1] if reference_coordinates == 4: outputs_coord_logits = delta_bbox + reference elif reference_coordinates == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f'reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}') outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) outputs_class = torch.stack(outputs_classes) outputs_coord = torch.stack(outputs_coords) logits = outputs_class[-1] pred_boxes = outputs_coord[-1] (loss, loss_dict, auxiliary_outputs) = (None, None, None) if labels is not None: matcher = GroundingDinoHungarianMatcher(class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost) losses = ['labels', 'boxes', 'cardinality'] criterion = GroundingDinoLoss(matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses) criterion.to(self.device) outputs_loss = {} outputs_loss['logits'] = logits outputs_loss['pred_boxes'] = pred_boxes if self.config.auxiliary_loss: auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss['auxiliary_outputs'] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs[-1].sigmoid() outputs_loss['enc_outputs'] = {'logits': outputs[-2], 'pred_boxes': enc_outputs_coord} loss_dict = criterion(outputs_loss, labels) weight_dict = {'loss_ce': 1, 'loss_bbox': self.config.bbox_loss_coefficient} weight_dict['loss_giou'] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f'_{i}': v for (k, v) in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum((loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = (loss, loss_dict) + output if loss is not None else output return tuple_outputs dict_outputs = GroundingDinoObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, last_hidden_state=outputs.last_hidden_state, auxiliary_outputs=auxiliary_outputs, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, encoder_last_hidden_state_vision=outputs.encoder_last_hidden_state_vision, encoder_last_hidden_state_text=outputs.encoder_last_hidden_state_text, encoder_vision_hidden_states=outputs.encoder_vision_hidden_states, encoder_text_hidden_states=outputs.encoder_text_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits) return dict_outputs # File: transformers-main/src/transformers/models/grounding_dino/processing_grounding_dino.py """""" import pathlib import sys from typing import Dict, List, Optional, Tuple, Union from ...image_processing_utils import BatchFeature from ...image_transforms import center_to_corners_format from ...image_utils import AnnotationFormat, ImageInput from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin if sys.version_info >= (3, 11): from typing import Unpack else: from typing_extensions import Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput from ...utils import TensorType, is_torch_available if is_torch_available(): import torch AnnotationType = Dict[str, Union[int, str, List[Dict]]] def get_phrases_from_posmap(posmaps, input_ids): left_idx = 0 right_idx = posmaps.shape[-1] - 1 posmaps = posmaps.clone() posmaps[:, 0:left_idx + 1] = False posmaps[:, right_idx:] = False token_ids = [] for posmap in posmaps: non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() token_ids.append([input_ids[i] for i in non_zero_idx]) return token_ids class GroundingDinoImagesKwargs(ImagesKwargs, total=False): annotations: Optional[Union[AnnotationType, List[AnnotationType]]] return_segmentation_masks: Optional[bool] masks_path: Optional[Union[str, pathlib.Path]] do_convert_annotations: Optional[bool] format: Optional[Union[str, AnnotationFormat]] class GroundingDinoProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: GroundingDinoImagesKwargs _defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': True, 'return_length': False, 'verbose': True}} class GroundingDinoProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] image_processor_class = 'GroundingDinoImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[GroundingDinoProcessorKwargs]) -> BatchEncoding: if images is None and text is None: raise ValueError('You must specify either text or images.') output_kwargs = self._merge_kwargs(GroundingDinoProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) if images is not None: encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs']) else: encoding_image_processor = BatchFeature() if text is not None: text_encoding = self.tokenizer(text=text, **output_kwargs['text_kwargs']) else: text_encoding = BatchEncoding() text_encoding.update(encoding_image_processor) return text_encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def post_process_grounded_object_detection(self, outputs, input_ids, box_threshold: float=0.25, text_threshold: float=0.25, target_sizes: Union[TensorType, List[Tuple]]=None): (logits, boxes) = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') probs = torch.sigmoid(logits) scores = torch.max(probs, dim=-1)[0] boxes = center_to_corners_format(boxes) if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: (img_h, img_w) = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for (idx, (s, b, p)) in enumerate(zip(scores, boxes, probs)): score = s[s > box_threshold] box = b[s > box_threshold] prob = p[s > box_threshold] label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx]) label = self.batch_decode(label_ids) results.append({'scores': score, 'labels': label, 'boxes': box}) return results # File: transformers-main/src/transformers/models/groupvit/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {'configuration_groupvit': ['GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_groupvit'] = ['GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_groupvit'] = ['TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel'] if TYPE_CHECKING: from .configuration_groupvit import GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/groupvit/configuration_groupvit.py """""" import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType logger = logging.get_logger(__name__) class GroupViTTextConfig(PretrainedConfig): model_type = 'groupvit_text_model' def __init__(self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.dropout = dropout self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'groupvit': config_dict = config_dict['text_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class GroupViTVisionConfig(PretrainedConfig): model_type = 'groupvit_vision_model' def __init__(self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.depths = depths if num_hidden_layers != sum(depths): logger.warning(f'Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers = sum(depth) = {sum(depths)}') self.num_hidden_layers = num_hidden_layers self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.assign_eps = assign_eps self.assign_mlp_ratio = assign_mlp_ratio @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'groupvit': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class GroupViTConfig(PretrainedConfig): model_type = 'groupvit' def __init__(self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict() for (key, value) in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()} for (key, value) in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.') self.text_config = GroupViTTextConfig(**text_config) self.vision_config = GroupViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.projection_intermediate_dim = projection_intermediate_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_range = 0.02 self.initializer_factor = 1.0 self.output_segmentation = False @classmethod def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) class GroupViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1, framework: Optional['TensorType']=None) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework) image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size, framework=framework) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14 # File: transformers-main/src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py """""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def rename_key(name): if 'img_encoder.pos_embed' in name: name = name.replace('img_encoder.pos_embed', 'vision_model.embeddings.position_embeddings') if 'img_encoder.patch_embed.proj' in name: name = name.replace('img_encoder.patch_embed.proj', 'vision_model.embeddings.patch_embeddings.projection') if 'img_encoder.patch_embed.norm' in name: name = name.replace('img_encoder.patch_embed.norm', 'vision_model.embeddings.layernorm') if 'img_encoder.layers' in name: name = name.replace('img_encoder.layers', 'vision_model.encoder.stages') if 'blocks' in name and 'res' not in name: name = name.replace('blocks', 'layers') if 'attn' in name and 'pre_assign' not in name: name = name.replace('attn', 'self_attn') if 'proj' in name and 'self_attn' in name and ('text' not in name): name = name.replace('proj', 'out_proj') if 'pre_assign_attn.attn.proj' in name: name = name.replace('pre_assign_attn.attn.proj', 'pre_assign_attn.attn.out_proj') if 'norm1' in name: name = name.replace('norm1', 'layer_norm1') if 'norm2' in name and 'pre_assign' not in name: name = name.replace('norm2', 'layer_norm2') if 'img_encoder.norm' in name: name = name.replace('img_encoder.norm', 'vision_model.layernorm') if 'text_encoder.token_embedding' in name: name = name.replace('text_encoder.token_embedding', 'text_model.embeddings.token_embedding') if 'text_encoder.positional_embedding' in name: name = name.replace('text_encoder.positional_embedding', 'text_model.embeddings.position_embedding.weight') if 'text_encoder.transformer.resblocks.' in name: name = name.replace('text_encoder.transformer.resblocks.', 'text_model.encoder.layers.') if 'ln_1' in name: name = name.replace('ln_1', 'layer_norm1') if 'ln_2' in name: name = name.replace('ln_2', 'layer_norm2') if 'c_fc' in name: name = name.replace('c_fc', 'fc1') if 'c_proj' in name: name = name.replace('c_proj', 'fc2') if 'text_encoder' in name: name = name.replace('text_encoder', 'text_model') if 'ln_final' in name: name = name.replace('ln_final', 'final_layer_norm') if 'img_projector.linear_hidden.' in name: name = name.replace('img_projector.linear_hidden.', 'visual_projection.') if 'img_projector.linear_out.' in name: name = name.replace('img_projector.linear_out.', 'visual_projection.3.') if 'text_projector.linear_hidden' in name: name = name.replace('text_projector.linear_hidden', 'text_projection') if 'text_projector.linear_out' in name: name = name.replace('text_projector.linear_out', 'text_projection.3') return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if 'qkv' in key: key_split = key.split('.') (stage_num, layer_num) = (int(key_split[2]), int(key_split[4])) dim = config.vision_config.hidden_size if 'weight' in key: orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight'] = val[:dim, :] orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight'] = val[dim:dim * 2, :] orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight'] = val[-dim:, :] else: orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias'] = val[:dim] orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias'] = val[dim:dim * 2] orig_state_dict[f'vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias'] = val[-dim:] elif 'in_proj' in key: key_split = key.split('.') layer_num = int(key_split[3]) dim = config.text_config.hidden_size if 'weight' in key: orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight'] = val[:dim, :] orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight'] = val[dim:dim * 2, :] orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight'] = val[-dim:, :] else: orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias'] = val[:dim] orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias'] = val[dim:dim * 2] orig_state_dict[f'text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias'] = val[-dim:] else: new_name = rename_key(key) if 'text_projection.0' in new_name or 'text_projection.3' in new_name or 'visual_projection.0' in new_name or ('visual_projection.3' in new_name): orig_state_dict[new_name] = val.squeeze_() else: orig_state_dict[new_name] = val return orig_state_dict def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_groupvit_checkpoint(checkpoint_path, pytorch_dump_folder_path, model_name='groupvit-gcc-yfcc', push_to_hub=False): config = GroupViTConfig() model = GroupViTModel(config).eval() state_dict = torch.load(checkpoint_path, map_location='cpu')['model'] new_state_dict = convert_state_dict(state_dict, config) (missing_keys, unexpected_keys) = model.load_state_dict(new_state_dict, strict=False) assert missing_keys == ['text_model.embeddings.position_ids'] assert unexpected_keys == ['multi_label_logit_scale'] or len(unexpected_keys) == 0 processor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32') image = prepare_img() inputs = processor(text=['a photo of a cat', 'a photo of a dog'], images=image, padding=True, return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) if model_name == 'groupvit-gcc-yfcc': expected_logits = torch.tensor([[13.3523, 6.3629]]) elif model_name == 'groupvit-gcc-redcaps': expected_logits = torch.tensor([[16.1873, 8.623]]) else: raise ValueError(f'Model name {model_name} not supported.') assert torch.allclose(outputs.logits_per_image, expected_logits, atol=0.001) processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) print('Successfully saved processor and model to', pytorch_dump_folder_path) if push_to_hub: print('Pushing to the hub...') processor.push_to_hub(model_name, organization='nielsr') model.push_to_hub(model_name, organization='nielsr') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument('--model_name', default='groupvit-gccy-fcc', type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'") parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.') args = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) # File: transformers-main/src/transformers/models/groupvit/modeling_groupvit.py """""" import collections.abc from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'nvidia/groupvit-gcc-yfcc' def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: torch.Tensor, dim: int): y_soft = logits.softmax(dim) index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft return ret def gumbel_softmax(logits: torch.Tensor, tau: float=1, hard: bool=False, dim: int=-1) -> torch.Tensor: gumbel_dist = torch.distributions.gumbel.Gumbel(torch.tensor(0.0, device=logits.device, dtype=logits.dtype), torch.tensor(1.0, device=logits.device, dtype=logits.dtype)) gumbels = gumbel_dist.sample(logits.shape) gumbels = (logits + gumbels) / tau y_soft = gumbels.softmax(dim) if hard: index = y_soft.max(dim, keepdim=True)[1] y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) ret = y_hard - y_soft.detach() + y_soft else: ret = y_soft return ret def resize_attention_map(attentions, height, width, align_corners=False): scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] // feat_height batch_size = attentions.shape[0] groups = attentions.shape[1] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate(attentions, size=(height, width), mode='bilinear', align_corners=align_corners) return attentions def get_grouping_from_attentions(attentions, hw_shape): attn_maps = [] with torch.no_grad(): prev_attn_masks = None for attn_masks in attentions: attn_masks = attn_masks.permute(0, 2, 1).contiguous() if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = prev_attn_masks @ attn_masks cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape) attn_maps.append(cur_attn_map) final_grouping = attn_maps[-1] return final_grouping class GroupViTCrossAttentionLayer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.attn = GroupViTAttention(config) self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, query, key): x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x class GroupViTAssignAttention(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.scale = config.hidden_size ** (-0.5) self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.assign_eps = config.assign_eps def get_attn(self, attn, gumbel=True, hard=True): if gumbel and self.training: attn = gumbel_softmax(attn, dim=-2, hard=hard) elif hard: attn = hard_softmax(attn, dim=-2) else: attn = nn.functional.softmax(attn, dim=-2) return attn def forward(self, query, key): value = key query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) raw_attn = query @ key.transpose(-2, -1) * self.scale attn = self.get_attn(raw_attn) soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False) attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps) out = attn @ value out = self.proj(out) return (out, soft_attn) class GroupViTTokenAssign(nn.Module): def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group): super().__init__() self.num_output_group = num_output_group self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) assign_mlp_ratio = config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) (tokens_dim, channels_dim) = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group) self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pre_assign_attn = GroupViTCrossAttentionLayer(config) self.assign = GroupViTAssignAttention(config) self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size) def project_group_token(self, group_tokens): projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def forward(self, image_tokens, group_tokens): group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) (new_image_tokens, attention) = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return (new_image_tokens, attention) @dataclass class GroupViTModelOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None segmentation_logits: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class GroupViTPatchEmbeddings(nn.Module): def __init__(self, image_size: int=224, patch_size: Union[int, Tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768): super().__init__() image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") x = self.projection(pixel_values).flatten(2).transpose(1, 2) return x class GroupViTVisionEmbeddings(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.patch_embeddings = GroupViTPatchEmbeddings(image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size)) self.dropout = nn.Dropout(config.dropout) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] num_positions = self.position_embeddings.shape[1] if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings patch_pos_embed = self.position_embeddings dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return patch_pos_embed def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) (batch_size, seq_len, _) = embeddings.size() if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class GroupViTTextEmbeddings(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class GroupViTStage(nn.Module): def __init__(self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int): super().__init__() self.depth = depth self.num_group_token = num_group_token if num_group_token > 0: self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size)) else: self.group_token = None self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)]) if num_group_token > 0: self.downsample = GroupViTTokenAssign(config=config, num_group_token=num_group_token, num_output_group=num_output_group) else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = nn.Sequential(nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token)) else: self.group_projector = None @property def with_group_token(self): return self.group_token is not None def split_x(self, x): if self.with_group_token: return (x[:, :-self.num_group_token], x[:, -self.num_group_token:]) else: return (x, None) def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor]=None) -> torch.Tensor: if group_token is None: return x return torch.cat([x, group_token], dim=1) def forward(self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: if self.with_group_token: group_token = self.group_token.expand(hidden_states.size(0), -1, -1) if self.group_projector is not None: group_token = group_token + self.group_projector(prev_group_token) else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None) cat_x = layer_out[0] (x, group_token) = self.split_x(cat_x) attention = None if self.downsample is not None: (x, attention) = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class GroupViTMLP(nn.Module): def __init__(self, config: GroupViTVisionConfig, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = nn.Linear(hidden_size, intermediate_size) self.fc2 = nn.Linear(intermediate_size, output_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class GroupViTMixerMLP(GroupViTMLP): def forward(self, x): x = super().forward(x.transpose(1, 2)) return x.transpose(1, 2) class GroupViTAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() is_cross_attention = encoder_hidden_states is not None query_states = self.q_proj(hidden_states) * self.scale if is_cross_attention: key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz) value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class GroupViTEncoderLayer(nn.Module): def __init__(self, config: GroupViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = GroupViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = GroupViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class GroupViTPreTrainedModel(PreTrainedModel): config_class = GroupViTConfig base_model_prefix = 'groupvit' supports_gradient_checkpointing = True def _init_weights(self, module): init_range = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=init_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) factor = self.config.initializer_factor if isinstance(module, GroupViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, GroupViTAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, GroupViTMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) GROUPVIT_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GROUPVIT_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' GROUPVIT_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' GROUPVIT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details.\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class GroupViTVisionEncoder(nn.Module): def __init__(self, config: GroupViTVisionConfig) -> None: super().__init__() self.config = config self.stages = nn.ModuleList([GroupViTStage(config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0) for i in range(len(config.depths))]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for (i, stage) in enumerate(self.stages): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings) class GroupViTTextEncoder(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class GroupViTTextTransformer(nn.Module): def __init__(self, config: GroupViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTTextEmbeddings(config) self.encoder = GroupViTTextEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.eos_token_id = config.eos_token_id @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1)] else: pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class GroupViTTextModel(GroupViTPreTrainedModel): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig): super().__init__(config) self.text_model = GroupViTTextTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig) def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) class GroupViTVisionTransformer(nn.Module): def __init__(self, config: GroupViTVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = GroupViTVisionEmbeddings(config) self.encoder = GroupViTVisionEncoder(config) self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values) encoder_outputs = self.encoder(hidden_states=hidden_states, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.layernorm(last_hidden_state) pooled_output = last_hidden_state.mean(dim=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class GroupViTVisionModel(GroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = 'pixel_values' def __init__(self, config: GroupViTVisionConfig): super().__init__(config) self.vision_model = GroupViTVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> GroupViTPatchEmbeddings: return self.vision_model.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class GroupViTModel(GroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig): super().__init__(config) if not isinstance(config.text_config, GroupViTTextConfig): raise TypeError(f'config.text_config is expected to be of type GroupViTTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, GroupViTVisionConfig): raise TypeError(f'config.vision_config is expected to be of type GroupViTVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = GroupViTTextTransformer(text_config) self.vision_model = GroupViTVisionTransformer(vision_config) self.visual_projection = nn.Sequential(nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True)) self.text_projection = nn.Sequential(nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True)) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING) def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = vision_outputs[1] image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig) def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_segmentation: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, GroupViTModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_segmentation = output_segmentation if output_segmentation is not None else self.config.output_segmentation if output_segmentation: output_attentions = True output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() seg_logits = None if output_segmentation: image_group_embeds = vision_outputs[0] image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1])) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True) logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale logits_per_image_group = logits_per_image_group.reshape(image_embeds.shape[0], -1, text_embeds.shape[0]).permute(0, 2, 1) flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1) seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = seg_logits.reshape(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]) loss = None if return_loss: loss = groupvit_loss(logits_per_text) if not return_dict: if seg_logits is not None: output = (logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return GroupViTModelOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) # File: transformers-main/src/transformers/models/groupvit/modeling_tf_groupvit.py """""" from __future__ import annotations import collections.abc import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_tensorflow_probability_available, logging, replace_return_docstrings from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig logger = logging.get_logger(__name__) if is_tensorflow_probability_available(): try: import tensorflow_probability as tfp _ = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: logger.error("GroupViT models are not usable since `tensorflow_probability` can't be loaded. It seems you have `tensorflow_probability` installed with the wrong tensorflow version.Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability.") else: try: import tensorflow_probability as tfp _ = tfp.distributions.Normal(loc=0.0, scale=1.0) except ImportError: pass _CHECKPOINT_FOR_DOC = 'nvidia/groupvit-gcc-yfcc' LARGE_NEGATIVE = -100000000.0 def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean(keras.metrics.sparse_categorical_crossentropy(y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True)) def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor: y_soft = stable_softmax(logits, dim) index = tf.argmax(y_soft, dim) y_hard = tf.one_hot(index, depth=shape_list(logits)[dim], axis=range(len(shape_list(logits)))[dim], dtype=y_soft.dtype) ret = y_hard - tf.stop_gradient(y_soft) + y_soft return ret def gumbel_softmax(logits: tf.Tensor, tau: float=1, hard: bool=False, dim: int=-1) -> tf.Tensor: gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0) gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype) gumbels = (logits + gumbels) / tau y_soft = stable_softmax(gumbels, dim) if hard: index = tf.argmax(y_soft, dim) y_hard = tf.one_hot(index, depth=shape_list(logits)[dim], axis=range(len(shape_list(logits)))[dim], dtype=y_soft.dtype) ret = y_hard - tf.stop_gradient(y_soft) + y_soft else: ret = y_soft return ret def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool=False) -> tf.Tensor: scale = (height * width // attentions.shape[2]) ** 0.5 if height > width: feat_width = int(np.round(width / scale)) feat_height = shape_list(attentions)[2] // feat_width else: feat_height = int(np.round(height / scale)) feat_width = shape_list(attentions)[2] // feat_height batch_size = shape_list(attentions)[0] groups = shape_list(attentions)[1] attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width)) attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) if align_corners: attentions = tf.compat.v1.image.resize(attentions, size=(height, width), method='bilinear', align_corners=align_corners) else: attentions = tf.image.resize(attentions, size=(height, width), method='bilinear') attentions = tf.transpose(attentions, perm=(0, 3, 1, 2)) return attentions def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor: attn_maps = [] prev_attn_masks = None for attn_masks in attentions: attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1)) if prev_attn_masks is None: prev_attn_masks = attn_masks else: prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks) cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape) attn_maps.append(cur_attn_map) final_grouping = attn_maps[-1] return tf.stop_gradient(final_grouping) @dataclass class TFGroupViTModelOutput(ModelOutput): loss: tf.Tensor | None = None logits_per_image: tf.Tensor = None logits_per_text: tf.Tensor = None segmentation_logits: tf.Tensor = None text_embeds: tf.Tensor = None image_embeds: tf.Tensor = None text_model_output: TFBaseModelOutputWithPooling = None vision_model_output: TFBaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys())) class TFGroupViTCrossAttentionLayer(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.attn = TFGroupViTAttention(config, name='attn') self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm2') self.mlp = TFGroupViTMLP(config, name='mlp') self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm_post') self.config = config def call(self, query: tf.Tensor, key: tf.Tensor, training: bool=False) -> tf.Tensor: x = query x = x + self.attn(query, encoder_hidden_states=key)[0] x = x + self.mlp(self.norm2(x)) x = self.norm_post(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attn', None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, 'norm2', None) is not None: with tf.name_scope(self.norm2.name): self.norm2.build([None, None, self.config.hidden_size]) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'norm_post', None) is not None: with tf.name_scope(self.norm_post.name): self.norm_post.build([None, None, self.config.hidden_size]) class TFGroupViTAssignAttention(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.scale = config.hidden_size ** (-0.5) self.q_proj = keras.layers.Dense(config.hidden_size, name='q_proj') self.k_proj = keras.layers.Dense(config.hidden_size, name='k_proj') self.v_proj = keras.layers.Dense(config.hidden_size, name='v_proj') self.proj = keras.layers.Dense(config.hidden_size, name='proj') self.assign_eps = config.assign_eps self.config = config def get_attn(self, attn: tf.Tensor, gumbel: bool=True, hard: bool=True, training: bool=False) -> tf.Tensor: if gumbel and training: attn = gumbel_softmax(attn, dim=-2, hard=hard) elif hard: attn = hard_softmax(attn, dim=-2) else: attn = stable_softmax(attn, axis=-2) return attn def call(self, query: tf.Tensor, key: tf.Tensor, training: bool=False): value = key query = self.q_proj(query) key = self.k_proj(key) value = self.v_proj(value) raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale attn = self.get_attn(raw_attn, training=training) soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False) attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps) out = tf.matmul(attn, value) out = self.proj(out) return (out, soft_attn) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.config.hidden_size]) if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.config.hidden_size]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.config.hidden_size]) if getattr(self, 'proj', None) is not None: with tf.name_scope(self.proj.name): self.proj.build([None, None, self.config.hidden_size]) class TFGroupViTTokenAssign(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs): super().__init__(**kwargs) self.num_output_group = num_output_group self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm_tokens') assign_mlp_ratio = config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio) (tokens_dim, channels_dim) = [int(x * config.hidden_size) for x in assign_mlp_ratio] self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name='mlp_inter') self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm_post_tokens') self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm_x') self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name='pre_assign_attn') self.assign = TFGroupViTAssignAttention(config, name='assign') self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='norm_new_x') self.mlp_channels = TFGroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size, name='mlp_channels') self.config = config def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor: projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool=False): group_tokens = self.norm_tokens(group_tokens) image_tokens = self.norm_x(image_tokens) projected_group_tokens = self.project_group_token(group_tokens) projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) (new_image_tokens, attention) = self.assign(projected_group_tokens, image_tokens) new_image_tokens += projected_group_tokens new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) return (new_image_tokens, attention) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'norm_tokens', None) is not None: with tf.name_scope(self.norm_tokens.name): self.norm_tokens.build([None, None, self.config.hidden_size]) if getattr(self, 'mlp_inter', None) is not None: with tf.name_scope(self.mlp_inter.name): self.mlp_inter.build(None) if getattr(self, 'norm_post_tokens', None) is not None: with tf.name_scope(self.norm_post_tokens.name): self.norm_post_tokens.build([None, None, self.config.hidden_size]) if getattr(self, 'norm_x', None) is not None: with tf.name_scope(self.norm_x.name): self.norm_x.build([None, None, self.config.hidden_size]) if getattr(self, 'pre_assign_attn', None) is not None: with tf.name_scope(self.pre_assign_attn.name): self.pre_assign_attn.build(None) if getattr(self, 'assign', None) is not None: with tf.name_scope(self.assign.name): self.assign.build(None) if getattr(self, 'norm_new_x', None) is not None: with tf.name_scope(self.norm_new_x.name): self.norm_new_x.build([None, None, self.config.hidden_size]) if getattr(self, 'mlp_channels', None) is not None: with tf.name_scope(self.mlp_channels.name): self.mlp_channels.build(None) class TFGroupViTPatchEmbeddings(keras.layers.Layer): def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) (image_size, patch_size) = (config.image_size, config.patch_size) num_channels = config.num_channels self.hidden_size = config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.num_channels = num_channels self.config = config self.projection = keras.layers.Conv2D(filters=self.hidden_size, kernel_size=patch_size, strides=patch_size, padding='valid', data_format='channels_last', use_bias=True, kernel_initializer=get_initializer(self.config.initializer_range), bias_initializer='zeros', name='projection') def call(self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool=False, training: bool=False) -> tf.Tensor: (batch_size, num_channels, height, width) = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') if not interpolate_pos_encoding and tf.executing_eagerly() and (height != self.image_size[0] or width != self.image_size[1]): raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).") pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) projection = self.projection(pixel_values) num_patches = width // self.patch_size[1] * (height // self.patch_size[0]) embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size)) return embeddings def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'projection', None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFGroupViTVisionEmbeddings(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name='patch_embeddings') self.dropout = keras.layers.Dropout(rate=config.dropout, name='dropout') self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm') self.config = config def build(self, input_shape=None): num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.add_weight(shape=(1, num_patches, self.config.hidden_size), initializer='zeros', trainable=True, name='position_embeddings') if self.built: return self.built = True if getattr(self, 'patch_embeddings', None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) if getattr(self, 'dropout', None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) if getattr(self, 'layernorm', None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.config.hidden_size]) def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: (batch_size, num_patches, dim) = shape_list(embeddings) num_positions = shape_list(self.position_embeddings)[1] if num_patches == num_positions and height == width: return self.position_embeddings patch_pos_embed = self.position_embeddings h0 = height // self.config.patch_size w0 = width // self.config.patch_size patch_pos_embed = tf.image.resize(images=tf.reshape(patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)), size=(h0, w0), method='bicubic') patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) return patch_pos_embed def call(self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool=False, training: bool=False) -> tf.Tensor: (_, _, height, width) = shape_list(pixel_values) embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) embeddings = self.layernorm(embeddings) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class TFGroupViTTextEmbeddings(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.config = config def build(self, input_shape: tf.TensorShape=None): with tf.name_scope('token_embedding'): self.weight = self.add_weight(shape=(self.config.vocab_size, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='weight') with tf.name_scope('position_embedding'): self.position_embedding = self.add_weight(shape=(self.config.max_position_embeddings, self.embed_dim), initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), trainable=True, name='embeddings') super().build(input_shape) def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None) -> tf.Tensor: if input_ids is None and inputs_embeds is None: raise ValueError('You have to specify either input_ids or inputs_embeds') if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) final_embeddings = inputs_embeds + position_embeds return final_embeddings class TFGroupViTStage(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int, **kwargs): super().__init__(**kwargs) self.config = config self.depth = depth self.num_group_token = num_group_token self.layers = [TFGroupViTEncoderLayer(config, name=f'layers_._{i}') for i in range(depth)] if num_group_token > 0: self.downsample = TFGroupViTTokenAssign(config=config, num_group_token=num_group_token, num_output_group=num_output_group, name='downsample') else: self.downsample = None if num_prev_group_token > 0 and num_group_token > 0: self.group_projector = [keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='group_projector.0'), TFGroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token, name='group_projector.1')] else: self.group_projector = None def build(self, input_shape=None): if self.num_group_token > 0: self.group_token = self.add_weight(shape=(1, self.num_group_token, self.config.hidden_size), initializer='zeros', trainable=True, name='group_token') else: self.group_token = None if self.built: return self.built = True if getattr(self, 'downsample', None) is not None: with tf.name_scope(self.downsample.name): self.downsample.build(None) if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, 'group_projector', None) is not None: with tf.name_scope(self.group_projector[0].name): self.group_projector[0].build([None, None, self.config.hidden_size]) with tf.name_scope(self.group_projector[1].name): self.group_projector[1].build(None) @property def with_group_token(self): return self.group_token is not None def split_x(self, x: tf.Tensor) -> tf.Tensor: if self.with_group_token: return (x[:, :-self.num_group_token], x[:, -self.num_group_token:]) else: return (x, None) def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None=None) -> tf.Tensor: if group_token is None: return x return tf.concat([x, group_token], axis=1) def call(self, hidden_states: tf.Tensor, prev_group_token: tf.Tensor | None=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]: if self.with_group_token: group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1)) if self.group_projector is not None: for layer in self.group_projector: prev_group_token = layer(prev_group_token) group_token = group_token + prev_group_token else: group_token = None x = hidden_states cat_x = self.concat_x(x, group_token) for layer in self.layers: layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None, output_attentions=None) cat_x = layer_out[0] (x, group_token) = self.split_x(cat_x) attention = None if self.downsample is not None: (x, attention) = self.downsample(x, group_token) outputs = (x, group_token) if output_attentions: outputs = outputs + (attention,) return outputs class TFGroupViTMLP(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None, **kwargs): super().__init__(**kwargs) self.config = config self.activation_fn = get_tf_activation(config.hidden_act) hidden_size = hidden_size if hidden_size is not None else config.hidden_size intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size output_size = output_size if output_size is not None else hidden_size self.fc1 = keras.layers.Dense(intermediate_size, name='fc1') self.fc2 = keras.layers.Dense(output_size, name='fc2') self.intermediate_size = intermediate_size self.hidden_size = hidden_size def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.hidden_size]) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.intermediate_size]) class TFGroupViTMixerMLP(TFGroupViTMLP): def call(self, x, training: bool=False): x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1))) return tf.transpose(x, perm=(0, 2, 1)) class TFGroupViTAttention(keras.layers.Layer): def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.attention_head_size = self.embed_dim // self.num_attention_heads if self.attention_head_size * self.num_attention_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_attention_heads}).') factor = config.initializer_factor in_proj_std = self.embed_dim ** (-0.5) * (2 * config.num_hidden_layers) ** (-0.5) * factor out_proj_std = self.embed_dim ** (-0.5) * factor self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.q_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='q_proj') self.k_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='k_proj') self.v_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name='v_proj') self.dropout = keras.layers.Dropout(rate=config.attention_dropout) self.out_proj = keras.layers.Dense(units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name='out_proj') def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor=None, causal_attention_mask: tf.Tensor=None, output_attentions: bool=None, encoder_hidden_states: tf.Tensor=None, training: bool=False) -> Tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] is_cross_attention = encoder_hidden_states is not None mixed_query_layer = self.q_proj(inputs=hidden_states) if is_cross_attention: mixed_key_layer = self.k_proj(inputs=encoder_hidden_states) mixed_value_layer = self.v_proj(inputs=encoder_hidden_states) else: mixed_key_layer = self.k_proj(inputs=hidden_states) mixed_value_layer = self.v_proj(inputs=hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if causal_attention_mask is not None: attention_scores = tf.add(attention_scores, causal_attention_mask) if attention_mask is not None: attention_scores = tf.add(attention_scores, attention_mask) _attention_probs = stable_softmax(logits=attention_scores, axis=-1) attention_probs = self.dropout(inputs=_attention_probs) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) attention_output = self.out_proj(attention_output) outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFGroupViTEncoderLayer(keras.layers.Layer): def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFGroupViTAttention(config, name='self_attn') self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm1') self.mlp = TFGroupViTMLP(config, name='mlp') self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm2') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(inputs=hidden_states) attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training) hidden_states = attention_outputs[0] hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(inputs=hidden_states) hidden_states = self.mlp(hidden_states=hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) + attention_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'layer_norm1', None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'layer_norm2', None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFGroupViTTextEncoder(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.layers = [TFGroupViTEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[Tuple, TFBaseModelOutput]: encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFGroupViTVisionEncoder(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None: super().__init__(**kwargs) self.stages = [TFGroupViTStage(config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, name=f'stages_._{i}') for i in range(len(config.depths))] def call(self, hidden_states: tf.Tensor, output_hidden_states: bool, output_attentions: bool, return_dict: bool, training: bool=False) -> Union[tuple, TFBaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_groupings = () if output_attentions else None group_tokens = None for stage in self.stages: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = stage(hidden_states, group_tokens, output_attentions) hidden_states = layer_outputs[0] group_tokens = layer_outputs[1] if output_attentions and layer_outputs[2] is not None: all_groupings = all_groupings + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'stages', None) is not None: for layer in self.stages: with tf.name_scope(layer.name): layer.build(None) class TFGroupViTTextTransformer(keras.layers.Layer): def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFGroupViTTextEmbeddings(config, name='embeddings') self.encoder = TFGroupViTTextEncoder(config, name='encoder') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='final_layer_norm') self.eos_token_id = config.eos_token_id self.embed_dim = config.hidden_size def call(self, input_ids: TFModelInputType, attention_mask: tf.Tensor, position_ids: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: input_shape = shape_list(input_ids) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) (batch_size, seq_length) = input_shape causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) attention_mask = _expand_mask(attention_mask) encoder_outputs = self.encoder(hidden_states=embedding_output, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) sequence_output = encoder_outputs[0] sequence_output = self.final_layer_norm(inputs=sequence_output) if self.eos_token_id == 2: pooled_output = tf.gather_nd(params=sequence_output, indices=tf.stack(values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1)) else: pooled_output = tf.gather_nd(params=sequence_output, indices=tf.stack(values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1)), axis=1)) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) to_mask = tf.linalg.band_part(to_mask, 0, -1) to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFGroupViTVisionTransformer(keras.layers.Layer): def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.embeddings = TFGroupViTVisionEmbeddings(config, name='embeddings') self.encoder = TFGroupViTVisionEncoder(config, name='encoder') self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layernorm') self.embed_dim = config.hidden_size def call(self, pixel_values: TFModelInputType, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[Tuple, TFBaseModelOutputWithPooling]: embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder(hidden_states=embedding_output, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.layernorm(last_hidden_state) pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'layernorm', None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.embed_dim]) @keras_serializable class TFGroupViTTextMainLayer(keras.layers.Layer): config_class = GroupViTTextConfig def __init__(self, config: GroupViTTextConfig, **kwargs): super().__init__(**kwargs) self.config = config self.text_model = TFGroupViTTextTransformer(config, name='text_model') def get_input_embeddings(self) -> keras.layers.Layer: return self.text_model.embeddings def set_input_embeddings(self, value: tf.Variable): self.text_model.embeddings.weight = value self.text_model.embeddings.vocab_size = shape_list(value)[0] @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if input_ids is None: raise ValueError('You have to specify input_ids') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_model_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return text_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'text_model', None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) @keras_serializable class TFGroupViTVisionMainLayer(keras.layers.Layer): config_class = GroupViTVisionConfig def __init__(self, config: GroupViTVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.vision_model = TFGroupViTVisionTransformer(config, name='vision_model') def get_input_embeddings(self) -> keras.layers.Layer: return self.vision_model.embeddings @unpack_inputs def call(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: if pixel_values is None: raise ValueError('You have to specify pixel_values') vision_model_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return vision_model_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) @keras_serializable class TFGroupViTMainLayer(keras.layers.Layer): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, GroupViTTextConfig): raise TypeError(f'config.text_config is expected to be of type GroupViTTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, GroupViTVisionConfig): raise TypeError(f'config.vision_config is expected to be of type GroupViTVisionConfig but is of type {type(config.vision_config)}.') self.config = config text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.projection_intermediate_dim = config.projection_intermediate_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = TFGroupViTTextTransformer(text_config, name='text_model') self.vision_model = TFGroupViTVisionTransformer(vision_config, name='vision_model') self.visual_projection = [keras.layers.Dense(self.projection_intermediate_dim, name='visual_projection.0'), keras.layers.BatchNormalization(name='visual_projection.1', momentum=0.9, epsilon=1e-05), keras.layers.ReLU(name='visual_projection.2'), keras.layers.Dense(self.projection_dim, name='visual_projection.3')] self.text_projection = [keras.layers.Dense(self.projection_intermediate_dim, name='text_projection.0'), keras.layers.BatchNormalization(name='text_projection.1', momentum=0.9, epsilon=1e-05), keras.layers.ReLU(name='text_projection.2'), keras.layers.Dense(self.projection_dim, name='text_projection.3')] def build(self, input_shape=None): self.logit_scale = self.add_weight(shape=(1,), initializer=keras.initializers.Constant(self.config.logit_scale_init_value), trainable=True, name='logit_scale') if self.built: return self.built = True if getattr(self, 'text_model', None) is not None: with tf.name_scope(self.text_model.name): self.text_model.build(None) if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'visual_projection', None) is not None: with tf.name_scope(self.visual_projection[0].name): self.visual_projection[0].build([None, None, None, self.vision_embed_dim]) with tf.name_scope(self.visual_projection[1].name): self.visual_projection[1].build((None, self.projection_intermediate_dim)) with tf.name_scope(self.visual_projection[3].name): self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim]) if getattr(self, 'text_projection', None) is not None: with tf.name_scope(self.text_projection[0].name): self.text_projection[0].build([None, None, None, self.text_embed_dim]) with tf.name_scope(self.text_projection[1].name): self.text_projection[1].build((None, self.projection_intermediate_dim)) with tf.name_scope(self.text_projection[3].name): self.text_projection[3].build([None, None, None, self.projection_intermediate_dim]) @unpack_inputs def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: if input_ids is None: raise ValueError('You have to specify either input_ids') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = text_outputs[1] for layer in self.text_projection: pooled_output = layer(pooled_output) text_features = pooled_output return text_features @unpack_inputs def get_image_features(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: if pixel_values is None: raise ValueError('You have to specify pixel_values') vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) pooled_output = vision_outputs[1] for layer in self.visual_projection: pooled_output = layer(pooled_output) image_features = pooled_output return image_features @unpack_inputs def call(self, input_ids: TFModelInputType | None=None, pixel_values: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_segmentation: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]: if input_ids is None: raise ValueError('You have to specify either input_ids') if pixel_values is None: raise ValueError('You have to specify pixel_values') input_shape = shape_list(input_ids) if attention_mask is None: attention_mask = tf.fill(dims=input_shape, value=1) if output_segmentation: output_attentions = True vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) image_embeds = vision_outputs[1] for layer in self.visual_projection: image_embeds = layer(image_embeds) text_embeds = text_outputs[1] for layer in self.text_projection: text_embeds = layer(text_embeds) image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True) logit_scale = tf.math.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) seg_logits = None if output_segmentation: image_group_embeds = vision_outputs[0] image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1])) for layer in self.visual_projection: image_group_embeds = layer(image_group_embeds) if output_hidden_states: attentions = vision_outputs[3] else: attentions = vision_outputs[2] grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) image_group_embeds = image_group_embeds / tf.norm(tensor=image_group_embeds, ord='euclidean', axis=-1, keepdims=True) logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale logits_per_image_group = tf.reshape(logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0])) logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1)) flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1)) seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale seg_logits = tf.reshape(seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])) loss = None if return_loss: loss = groupvit_loss(logits_per_text)[None, ...] if not return_dict: if seg_logits is not None: output = (logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs) else: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return TFGroupViTModelOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) class TFGroupViTPreTrainedModel(TFPreTrainedModel): config_class = GroupViTConfig base_model_prefix = 'groupvit' GROUPVIT_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the\n first positional argument :\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`\n\n \n\n Args:\n config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' GROUPVIT_TEXT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' GROUPVIT_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' GROUPVIT_INPUTS_DOCSTRING = '\n Args:\n input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`CLIPImageProcessor.__call__`] for details.\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n' class TFGroupViTTextModel(TFGroupViTPreTrainedModel): config_class = GroupViTTextConfig main_input_name = 'input_ids' def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTTextMainLayer(config, name='groupvit') @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig) def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.groupvit(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'groupvit', None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) class TFGroupViTVisionModel(TFGroupViTPreTrainedModel): config_class = GroupViTVisionConfig main_input_name = 'pixel_values' def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTVisionMainLayer(config, name='groupvit') @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig) def call(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: outputs = self.groupvit(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'groupvit', None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) @add_start_docstrings(GROUPVIT_START_DOCSTRING) class TFGroupViTModel(TFGroupViTPreTrainedModel): config_class = GroupViTConfig def __init__(self, config: GroupViTConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.groupvit = TFGroupViTMainLayer(config, name='groupvit') @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) def get_text_features(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: text_features = self.groupvit.get_text_features(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return text_features @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) def get_image_features(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor: image_features = self.groupvit.get_image_features(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return image_features @unpack_inputs @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig) def call(self, input_ids: TFModelInputType | None=None, pixel_values: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_segmentation: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]: outputs = self.groupvit(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_segmentation=output_segmentation, return_dict=return_dict, training=training) return outputs def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput: return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'groupvit', None) is not None: with tf.name_scope(self.groupvit.name): self.groupvit.build(None) # File: transformers-main/src/transformers/models/herbert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _import_structure = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['tokenization_herbert_fast'] = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/herbert/tokenization_herbert.py import json import os import re import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} def get_pairs(word): pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def replace_unicode_punct(text): text = text.replace(',', ',') text = re.sub('。\\s*', '. ', text) text = text.replace('、', ',') text = text.replace('”', '"') text = text.replace('“', '"') text = text.replace('∶', ':') text = text.replace(':', ':') text = text.replace('?', '?') text = text.replace('《', '"') text = text.replace('》', '"') text = text.replace(')', ')') text = text.replace('!', '!') text = text.replace('(', '(') text = text.replace(';', ';') text = text.replace('1', '1') text = text.replace('」', '"') text = text.replace('「', '"') text = text.replace('0', '0') text = text.replace('3', '3') text = text.replace('2', '2') text = text.replace('5', '5') text = text.replace('6', '6') text = text.replace('9', '9') text = text.replace('7', '7') text = text.replace('8', '8') text = text.replace('4', '4') text = re.sub('.\\s*', '. ', text) text = text.replace('~', '~') text = text.replace('’', "'") text = text.replace('…', '...') text = text.replace('━', '-') text = text.replace('〈', '<') text = text.replace('〉', '>') text = text.replace('【', '[') text = text.replace('】', ']') text = text.replace('%', '%') return text def remove_non_printing_char(text): output = [] for char in text: cat = unicodedata.category(char) if cat.startswith('C'): continue output.append(char) return ''.join(output) def whitespace_tokenize(text): text = text.strip() if not text: return [] tokens = text.split() return tokens class BasicTokenizer: def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) unicode_normalized_text = unicodedata.normalize('NFC', text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(' '.join(split_tokens)) return output_tokens def _run_strip_accents(self, text): text = unicodedata.normalize('NFD', text) output = [] for char in text: cat = unicodedata.category(char) if cat == 'Mn': continue output.append(char) return ''.join(output) def _run_split_on_punc(self, text, never_split=None): if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return [''.join(x) for x in output] def _tokenize_chinese_chars(self, text): output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(' ') output.append(char) output.append(' ') else: output.append(char) return ''.join(output) def _is_chinese_char(self, cp): if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103): return True return False def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 65533 or _is_control(char): continue if _is_whitespace(char): output.append(' ') else: output.append(char) return ''.join(output) class HerbertTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, merges_file, tokenizer_file=None, cls_token='', unk_token='', pad_token='', mask_token='', sep_token='', bos_token='', do_lowercase_and_remove_accent=False, additional_special_tokens=['', '', '', '', '', '', '', '', '', ''], lang2id=None, id2lang=None, **kwargs): try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use HerbertTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses self.cache_moses_punct_normalizer = {} self.cache_moses_tokenizer = {} self.lang_with_custom_tokenizer = {'zh', 'th', 'ja'} self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent self.lang2id = lang2id self.id2lang = id2lang if lang2id is not None and id2lang is not None: assert len(lang2id) == len(id2lang) self.ja_word_tokenizer = None self.zh_word_tokenizer = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, tokenizer_file=None, **kwargs) self.bert_pre_tokenizer = BasicTokenizer(do_lower_case=False, never_split=self.all_special_tokens, tokenize_chinese_chars=False, strip_accents=False) @property def do_lower_case(self): return self.do_lowercase_and_remove_accent def moses_punct_norm(self, text, lang): if lang not in self.cache_moses_punct_normalizer: punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) self.cache_moses_punct_normalizer[lang] = punct_normalizer else: punct_normalizer = self.cache_moses_punct_normalizer[lang] return punct_normalizer.normalize(text) def moses_tokenize(self, text, lang): if lang not in self.cache_moses_tokenizer: moses_tokenizer = self.sm.MosesTokenizer(lang=lang) self.cache_moses_tokenizer[lang] = moses_tokenizer else: moses_tokenizer = self.cache_moses_tokenizer[lang] return moses_tokenizer.tokenize(text, return_str=False, escape=False) def moses_pipeline(self, text, lang): text = replace_unicode_punct(text) text = self.moses_punct_norm(text, lang) text = remove_non_printing_char(text) return text def ja_tokenize(self, text): if self.ja_word_tokenizer is None: try: import Mykytea self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin") except (AttributeError, ImportError): logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps") logger.error('1. git clone git@github.com:neubig/kytea.git && cd kytea') logger.error('2. autoreconf -i') logger.error('3. ./configure --prefix=$HOME/local') logger.error('4. make && make install') logger.error('5. pip install kytea') raise return list(self.ja_word_tokenizer.getWS(text)) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + '' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def _tokenize(self, text): pre_tokens = self.bert_pre_tokenizer.tokenize(text) split_tokens = [] for token in pre_tokens: if token: split_tokens.extend(list(self.bpe(token).split(' '))) return split_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): out_string = ''.join(tokens).replace('', ' ').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: bos = [self.bos_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return bos + token_ids_0 + sep return bos + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file) def __getstate__(self): state = self.__dict__.copy() state['sm'] = None return state def __setstate__(self, d): self.__dict__ = d try: import sacremoses except ImportError: raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.') self.sm = sacremoses # File: transformers-main/src/transformers/models/herbert/tokenization_herbert_fast.py from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} class HerbertTokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = HerbertTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token='', unk_token='', pad_token='', mask_token='', sep_token='', **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: cls = [self.cls_token_id] sep = [self.sep_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) # File: transformers-main/src/transformers/models/hiera/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_hiera': ['HieraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_hiera'] = ['HieraForImageClassification', 'HieraForPreTraining', 'HieraBackbone', 'HieraModel', 'HieraPreTrainedModel'] if TYPE_CHECKING: from .configuration_hiera import HieraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_hiera import HieraBackbone, HieraForImageClassification, HieraForPreTraining, HieraModel, HieraPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/hiera/configuration_hiera.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class HieraConfig(BackboneConfigMixin, PretrainedConfig): model_type = 'hiera' attribute_map = {'num_hidden_layers': 'num_layers'} def __init__(self, embed_dim=96, image_size=[224, 224], patch_size=[7, 7], patch_stride=[4, 4], patch_padding=[3, 3], mlp_ratio=4.0, depths=[2, 3, 16, 3], num_heads=[1, 2, 4, 8], embed_dim_multiplier=2.0, num_query_pool=3, query_stride=[2, 2], masked_unit_size=[8, 8], masked_unit_attention=[True, True, False, False], drop_path_rate=0.0, num_channels=3, hidden_act='gelu', initializer_range=0.02, layer_norm_init=1.0, layer_norm_eps=1e-06, decoder_hidden_size=None, decoder_depth=None, decoder_num_heads=None, normalize_pixel_loss=True, mask_ratio=0.6, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) if masked_unit_size[0] % query_stride[0] ** (len(depths) - 1) != 0: raise ValueError(f'masked_unit_size[0] ({masked_unit_size[0]}) must be divisible by query_stride[0] ({query_stride[0]}) raised to the power of the number of layers ({len(depths) - 1})') if num_query_pool >= len(depths): raise ValueError(f'num_query_pool ({num_query_pool}) must be less than the number of layers ({len(depths)})') self.embed_dim = embed_dim self.image_size = image_size self.patch_size = patch_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.mlp_ratio = mlp_ratio self.depths = depths self.num_heads = num_heads self.num_layers = len(depths) self.embed_dim_multiplier = embed_dim_multiplier self.num_query_pool = num_query_pool self.query_stride = query_stride self.masked_unit_size = masked_unit_size self.masked_unit_attention = masked_unit_attention self.drop_path_rate = drop_path_rate self.num_channels = num_channels self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_init = layer_norm_init self.layer_norm_eps = layer_norm_eps self.decoder_hidden_size = decoder_hidden_size self.decoder_depth = decoder_depth self.decoder_num_heads = decoder_num_heads self.normalize_pixel_loss = normalize_pixel_loss self.mask_ratio = mask_ratio self.hidden_size = int(embed_dim * embed_dim_multiplier ** (len(depths) - 1)) self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)] (self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names) # File: transformers-main/src/transformers/models/hiera/convert_hiera_to_hf.py """""" import argparse import json import math from typing import Dict, Tuple import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, HieraConfig, HieraForImageClassification, HieraForPreTraining, HieraModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def create_rename_keys(config: HieraConfig, base_model: bool, mae_model: bool): rename_keys = [] num_stages = len(config.depths) dims = [config.embed_dim] + [int(config.embed_dim * config.embed_dim_multiplier ** i) for i in range(num_stages)] global_layer_idx = 0 for stage_idx in range(num_stages): dim_in = dims[stage_idx] dim_out = dims[stage_idx + 1] for layer_idx in range(config.depths[stage_idx]): rename_keys.append((f'blocks.{global_layer_idx}.norm1.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.weight')) rename_keys.append((f'blocks.{global_layer_idx}.norm1.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.bias')) rename_keys.append((f'blocks.{global_layer_idx}.attn.qkv.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.weight')) rename_keys.append((f'blocks.{global_layer_idx}.attn.qkv.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.bias')) rename_keys.append((f'blocks.{global_layer_idx}.attn.proj.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.weight')) rename_keys.append((f'blocks.{global_layer_idx}.attn.proj.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.bias')) rename_keys.append((f'blocks.{global_layer_idx}.norm2.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.weight')) rename_keys.append((f'blocks.{global_layer_idx}.norm2.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.bias')) rename_keys.append((f'blocks.{global_layer_idx}.mlp.fc1.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.weight')) rename_keys.append((f'blocks.{global_layer_idx}.mlp.fc1.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.bias')) rename_keys.append((f'blocks.{global_layer_idx}.mlp.fc2.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.weight')) rename_keys.append((f'blocks.{global_layer_idx}.mlp.fc2.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.bias')) if dim_out != dim_in and layer_idx == 0: rename_keys.append((f'blocks.{global_layer_idx}.proj.weight', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.weight')) rename_keys.append((f'blocks.{global_layer_idx}.proj.bias', f'hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.bias')) global_layer_idx += 1 rename_keys.extend([('patch_embed.proj.weight', 'hiera.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'hiera.embeddings.patch_embeddings.projection.bias')]) rename_keys.append(('pos_embed', 'hiera.embeddings.position_embeddings')) if base_model: rename_keys.extend([('norm.weight', 'pooler.layernorm.weight'), ('norm.bias', 'pooler.layernorm.bias')]) rename_keys = [(pair[0], pair[1][6:]) if pair[1].startswith('hiera') else pair for pair in rename_keys] elif mae_model: rename_keys.extend([('encoder_norm.weight', 'encoder_norm.weight'), ('encoder_norm.bias', 'encoder_norm.bias'), ('mask_token', 'decoder.mask_token'), ('decoder_pos_embed', 'decoder.decoder_position_embeddings'), ('decoder_norm.weight', 'decoder.decoder_norm.weight'), ('decoder_norm.bias', 'decoder.decoder_norm.bias'), ('decoder_pred.weight', 'decoder.decoder_pred.weight'), ('decoder_pred.bias', 'decoder.decoder_pred.bias'), ('decoder_embed.weight', 'decoder.decoder_embeddings.weight'), ('decoder_embed.bias', 'decoder.decoder_embeddings.bias')]) for i in range(config.decoder_depth): rename_keys.extend([(f'decoder_blocks.{i}.norm1.weight', f'decoder.decoder_block.layers.{i}.layernorm_before.weight'), (f'decoder_blocks.{i}.norm1.bias', f'decoder.decoder_block.layers.{i}.layernorm_before.bias'), (f'decoder_blocks.{i}.attn.qkv.weight', f'decoder.decoder_block.layers.{i}.attn.qkv.weight'), (f'decoder_blocks.{i}.attn.qkv.bias', f'decoder.decoder_block.layers.{i}.attn.qkv.bias'), (f'decoder_blocks.{i}.attn.proj.weight', f'decoder.decoder_block.layers.{i}.attn.proj.weight'), (f'decoder_blocks.{i}.attn.proj.bias', f'decoder.decoder_block.layers.{i}.attn.proj.bias'), (f'decoder_blocks.{i}.norm2.weight', f'decoder.decoder_block.layers.{i}.layernorm_after.weight'), (f'decoder_blocks.{i}.norm2.bias', f'decoder.decoder_block.layers.{i}.layernorm_after.bias'), (f'decoder_blocks.{i}.mlp.fc1.weight', f'decoder.decoder_block.layers.{i}.mlp.fc1.weight'), (f'decoder_blocks.{i}.mlp.fc1.bias', f'decoder.decoder_block.layers.{i}.mlp.fc1.bias'), (f'decoder_blocks.{i}.mlp.fc2.weight', f'decoder.decoder_block.layers.{i}.mlp.fc2.weight'), (f'decoder_blocks.{i}.mlp.fc2.bias', f'decoder.decoder_block.layers.{i}.mlp.fc2.bias')]) for i in range(config.num_query_pool): rename_keys.extend([(f'multi_scale_fusion_heads.{i}.weight', f'multiscale_fusion.multi_scale_fusion_heads.{i}.weight'), (f'multi_scale_fusion_heads.{i}.bias', f'multiscale_fusion.multi_scale_fusion_heads.{i}.bias')]) else: rename_keys.extend([('norm.weight', 'hiera.pooler.layernorm.weight'), ('norm.bias', 'hiera.pooler.layernorm.bias'), ('head.projection.weight', 'classifier.weight'), ('head.projection.bias', 'classifier.bias')]) return rename_keys def remove_classification_head_(state_dict): ignore_keys = ['head.projection.weight', 'head.projection.bias'] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def prepare_img(): url = 'http://images.cocodataset.org/val2017/000000039769.jpg' im = Image.open(requests.get(url, stream=True).raw) return im def get_labels_for_classifier(model_name: str) -> Tuple[Dict[int, str], Dict[str, int], int]: repo_id = 'huggingface/label-files' filename = 'imagenet-1k-id2label.json' id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type='dataset'), 'r')) id2label = {int(k): v for (k, v) in id2label.items()} label2id = {v: k for (k, v) in id2label.items()} num_labels = len(id2label) return (id2label, label2id, num_labels) def get_hiera_config(model_name: str, base_model: bool, mae_model: bool) -> HieraConfig: if model_name == 'hiera-tiny-224': config = HieraConfig(depths=[1, 2, 7, 2]) elif model_name == 'hiera-small-224': config = HieraConfig(depths=[1, 2, 11, 2]) elif model_name == 'hiera-base-224': config = HieraConfig() elif model_name == 'hiera-base-plus-224': config = HieraConfig(embed_dim=112, num_heads=[2, 4, 8, 16]) elif model_name == 'hiera-large-224': config = HieraConfig(embed_dim=144, num_heads=[2, 4, 8, 16], depths=[2, 6, 36, 4]) elif model_name == 'hiera-huge-224': config = HieraConfig(embed_dim=256, num_heads=[4, 8, 16, 32], depths=[2, 6, 36, 4]) else: raise ValueError(f'Unrecognized model name: {model_name}') if base_model: pass elif mae_model: config.num_query_pool = 2 config.decoder_hidden_size = 512 config.decoder_depth = 8 config.decoder_num_heads = 16 config.mask_ratio = 0.6 else: (id2label, label2id, num_labels) = get_labels_for_classifier(model_name) config.id2label = id2label config.label2id = label2id config.num_labels = num_labels return config @torch.no_grad() def convert_hiera_checkpoint(args): model_name = args.model_name base_model = args.base_model pytorch_dump_folder_path = args.pytorch_dump_folder_path push_to_hub = args.push_to_hub mae_model = args.mae_model config = get_hiera_config(model_name, base_model, mae_model) original_model_name = model_name.replace('-', '_') original_model_name = f'mae_{original_model_name}' if mae_model else original_model_name original_checkpoint_name = 'mae_in1k_ft_in1k' if not (base_model or mae_model) else 'mae_in1k' original_model = torch.hub.load('facebookresearch/hiera', model=original_model_name, pretrained=True, checkpoint=original_checkpoint_name) original_model.eval() original_state_dict = original_model.state_dict() if base_model: remove_classification_head_(original_state_dict) new_state_dict = original_state_dict.copy() rename_keys = create_rename_keys(config, base_model, mae_model) for (src, dest) in rename_keys: rename_key(new_state_dict, src, dest) if base_model: model = HieraModel(config) elif mae_model: model = HieraForPreTraining(config) else: model = HieraForImageClassification(config) model.eval() (missing_keys, unexpected_keys) = model.load_state_dict(new_state_dict, strict=False) print('Missing keys:', missing_keys) print('Unexpected keys:', unexpected_keys) input_image = prepare_img() original_image_preprocessor = transforms.Compose([transforms.Resize(int(256 / 224 * 224), interpolation=transforms.functional.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)]) image_processor = BitImageProcessor(image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, size={'shortest_edge': 256}) inputs = image_processor(images=input_image, return_tensors='pt') expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0) input_image = prepare_img() inputs = image_processor(images=input_image, return_tensors='pt') expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0) assert torch.allclose(inputs.pixel_values, expected_pixel_values, atol=0.0001) print('Pixel values look good!') print(f'inputs.pixel_values[0, :3, :3, :3]={inputs.pixel_values[0, :3, :3, :3]!r}') mask_spatial_shape = [i // s // ms for (i, s, ms) in zip(config.image_size, config.patch_stride, config.masked_unit_size)] num_windows = math.prod(mask_spatial_shape) torch.manual_seed(2) noise = torch.rand(1, num_windows) outputs = model(**inputs) if not mae_model else model(noise=noise, **inputs) if base_model: (expected_prob, expected_intermediates) = original_model(expected_pixel_values, return_intermediates=True) expected_last_hidden = expected_intermediates[-1] (batch_size, _, _, hidden_dim) = expected_last_hidden.shape expected_last_hidden = expected_last_hidden.reshape(batch_size, -1, hidden_dim) assert torch.allclose(outputs.last_hidden_state, expected_last_hidden, atol=0.001) print('Base Model looks good as hidden states match original implementation!') print(f'outputs.last_hidden_state[0, :3, :3]={outputs.last_hidden_state[0, :3, :3]!r}') elif mae_model: (mask, _) = model.hiera.embeddings.patch_embeddings.random_masking(expected_pixel_values, noise) (expected_loss, _, _, _) = original_model(expected_pixel_values, mask=mask.bool()) assert torch.allclose(outputs.loss, expected_loss, atol=0.001) print('MAE Model looks good as loss matches original implementation!') else: expected_prob = original_model(expected_pixel_values) assert torch.allclose(outputs.logits.softmax(dim=-1), expected_prob, atol=0.001) print('Classifier looks good as probs match original implementation') print(f'outputs.logits[:, :5]={outputs.logits[:, :5]!r}') if pytorch_dump_folder_path is not None: print(f'Saving model and processor for {model_name} to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: hub_name = model_name if base_model: hub_name = model_name elif mae_model: hub_name = f'{model_name}-mae' else: hub_name = f'{model_name}-in1k' repo_id = f'EduardoPacheco/{hub_name}' print(f'Pushing model and processor for {model_name} to hub at {repo_id}') model.push_to_hub(repo_id) image_processor.push_to_hub(repo_id) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model-name', default='hiera-tiny-224', type=str, choices=['hiera-tiny-224', 'hiera-small-224', 'hiera-base-224', 'hiera-base-plus-224', 'hiera-large-224', 'hiera-huge-224'], help="Name of the Hiera model you'd like to convert.") parser.add_argument('--pytorch-dump-folder_path', default=None, type=str, help='Path to the output PyTorch model directory.') parser.add_argument('--verify-logits', action='store_true', help='Whether or not to verify the logits against the original implementation.') parser.add_argument('--push-to-hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.') parser.add_argument('--base-model', action='store_true', help='Whether to only convert the base model (no projection head weights).') parser.add_argument('--mae-model', action='store_true', help='Whether to convert to MAE checkpoint to HieraForPreTraining.') args = parser.parse_args() convert_hiera_checkpoint(args) # File: transformers-main/src/transformers/models/hiera/modeling_hiera.py """""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ...utils.backbone_utils import BackboneMixin from .configuration_hiera import HieraConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'HieraConfig' _CHECKPOINT_FOR_DOC = 'facebook/hiera-tiny-224-hf' _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] _IMAGE_CLASS_CHECKPOINT = 'facebook/hiera-tiny-224-in1k-hf' _IMAGE_CLASS_EXPECTED_OUTPUT = 'tabby, tabby cat' @dataclass class HieraEncoderOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None bool_masked_pos: torch.BoolTensor = None ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraForImageClassificationOutput(ImageClassifierOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class HieraForPreTrainingOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None bool_masked_pos: torch.BoolTensor = None ids_restore: torch.LongTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class HieraPatchEmbeddings(nn.Module): def __init__(self, config, is_mae: bool=False): super().__init__() self.spatial_dims = len(config.patch_size) if self.spatial_dims != 2: raise ValueError(f'The number of dimensions of the input image should be 2, but got {self.spatial_dims}.') self.num_channels = config.num_channels self.image_size = config.image_size[-2:] self.tokens_spatial_shape = [i // s for (i, s) in zip(config.image_size, config.patch_stride)] self.mask_spatial_shape = [i // s for (i, s) in zip(self.tokens_spatial_shape, config.masked_unit_size)] self.mask_ratio = config.mask_ratio self.is_mae = is_mae self.projection = nn.Conv2d(self.num_channels, config.embed_dim, kernel_size=config.patch_size, stride=config.patch_stride, padding=config.patch_padding) def masked_conv(self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor: if bool_masked_pos is None: return self.projection(pixel_values) target_size = pixel_values.shape[2:] bool_masked_pos = bool_masked_pos.view(pixel_values.shape[0], 1, *self.mask_spatial_shape) bool_masked_pos = nn.functional.interpolate(bool_masked_pos.float(), size=target_size) return self.projection(pixel_values * bool_masked_pos) def random_masking(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> Tuple[torch.BoolTensor, torch.LongTensor]: batch_size = pixel_values.shape[0] num_windows = math.prod(self.mask_spatial_shape) len_keep = int(num_windows * (1 - self.mask_ratio)) if noise is None: noise = torch.rand(batch_size, num_windows, device=pixel_values.device) ids_shuffle = torch.argsort(noise, dim=1) ids_restore = torch.argsort(ids_shuffle, dim=1).to(pixel_values.device) bool_masked_pos = torch.zeros([batch_size, num_windows], device=pixel_values.device) bool_masked_pos[:, :len_keep] = 1 bool_masked_pos = torch.gather(bool_masked_pos, dim=1, index=ids_restore).bool() return (bool_masked_pos, ids_restore) def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> Tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]: (bool_masked_pos, ids_restore) = self.random_masking(pixel_values, noise=noise) if self.is_mae else (None, None) embeddings = self.masked_conv(pixel_values, bool_masked_pos) embeddings = embeddings.flatten(2).transpose(2, 1) return (embeddings, bool_masked_pos, ids_restore) class HieraEmbeddings(nn.Module): def __init__(self, config: HieraConfig, is_mae: bool=False) -> None: super().__init__() self.patch_stride = config.patch_stride tokens_spatial_shape = [i // s for (i, s) in zip(config.image_size, config.patch_stride)] self.mask_spatial_shape = [i // s for (i, s) in zip(tokens_spatial_shape, config.masked_unit_size)] self.num_tokens = math.prod(tokens_spatial_shape) self.is_mae = is_mae self.patch_embeddings = HieraPatchEmbeddings(config, is_mae=is_mae) self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_tokens, config.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, pos_embeds: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] num_positions = pos_embeds.shape[1] if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return pos_embeds dim = embeddings.shape[-1] new_height = height // self.patch_stride[0] new_width = width // self.patch_stride[1] sqrt_num_positions = torch_int(num_positions ** 0.5) pos_embeds = pos_embeds.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) pos_embeds = pos_embeds.permute(0, 3, 1, 2) pos_embeds = nn.functional.interpolate(pos_embeds, size=(new_height, new_width), mode='bicubic', align_corners=False) pos_embeds = pos_embeds.permute(0, 2, 3, 1).view(1, -1, dim) return pos_embeds def get_position_embedding(self, embeddings: torch.Tensor, height: int, width: int, interpolate_pos_encoding: bool) -> torch.FloatTensor: return self.interpolate_pos_encoding(embeddings, self.position_embeddings, height, width) if interpolate_pos_encoding else self.position_embeddings def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False) -> Tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]: (height, width) = pixel_values.shape[-2:] (embeddings, bool_masked_pos, ids_restore) = self.patch_embeddings(pixel_values, noise=noise) embeddings = embeddings + self.get_position_embedding(embeddings, height, width, interpolate_pos_encoding) return (embeddings, bool_masked_pos, ids_restore) class HieraMaskUnitAttention(nn.Module): def __init__(self, hidden_size: int, hidden_size_output: int, num_heads: int, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None: super().__init__() self.num_heads = num_heads self.query_stride = query_stride self.hidden_size_output = hidden_size_output self.head_dim = hidden_size_output // num_heads self.scale = self.head_dim ** (-0.5) self.qkv = nn.Linear(hidden_size, 3 * hidden_size_output) self.proj = nn.Linear(hidden_size_output, hidden_size_output) self.window_size = window_size self.use_mask_unit_attn = use_mask_unit_attn def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (batch_size, seq_len, _) = hidden_states.shape num_windows = 1 if self.use_mask_unit_attn: num_windows = seq_len // (self.query_stride * self.window_size) qkv = self.qkv(hidden_states) qkv = qkv.reshape(batch_size, -1, num_windows, 3, self.num_heads, self.head_dim) qkv = qkv.permute(3, 0, 4, 2, 1, 5) (query, key, value) = qkv.unbind(0) if self.query_stride > 1: query = query.view(batch_size, self.num_heads, num_windows, self.query_stride, -1, self.head_dim) query = query.max(dim=3).values attn_weights = query * self.scale @ key.transpose(-1, -2) attn_weights = attn_weights.softmax(dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = attn_weights @ value attn_output = attn_output.transpose(1, 3).reshape(batch_size, -1, self.hidden_size_output) attn_output = self.proj(attn_output) return (attn_output, attn_weights) if output_attentions else (attn_output, None) def drop_path(input: torch.Tensor, drop_prob: float=0.0, training: bool=False) -> torch.Tensor: if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() output = input.div(keep_prob) * random_tensor return output class HieraDropPath(nn.Module): def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class HieraMlp(nn.Module): def __init__(self, config, dim: int) -> None: super().__init__() self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(dim, int(dim * config.mlp_ratio)) self.fc2 = nn.Linear(int(dim * config.mlp_ratio), dim) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class HieraLayer(nn.Module): def __init__(self, config, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: float=0.0, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None: super().__init__() self.hidden_size = hidden_size self.hidden_size_output = hidden_size_output self.query_stride = query_stride self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attn = HieraMaskUnitAttention(hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=num_heads, query_stride=query_stride, window_size=window_size, use_mask_unit_attn=use_mask_unit_attn) self.layernorm_after = nn.LayerNorm(hidden_size_output, eps=config.layer_norm_eps) self.mlp = HieraMlp(config, hidden_size_output) self.drop_path = HieraDropPath(drop_path) if drop_path > 0 else nn.Identity() if hidden_size != hidden_size_output: self.proj = nn.Linear(hidden_size, hidden_size_output) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (batch_size, seq_len, _) = hidden_states.shape hidden_states_norm = self.layernorm_before(hidden_states) if self.hidden_size != self.hidden_size_output: hidden_states = self.proj(hidden_states_norm) hidden_states = hidden_states.view(batch_size, self.query_stride, -1, self.hidden_size_output).max(dim=1).values (hidden_states_norm, attn_weights) = self.attn(hidden_states_norm, head_mask, output_attentions=output_attentions) hidden_states = hidden_states + self.drop_path(hidden_states_norm) residual = hidden_states hidden_states = self.layernorm_after(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.drop_path(hidden_states) return (hidden_states, attn_weights) class HieraStage(nn.Module): def __init__(self, config, depth: int, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: List[float], query_stride: List[int], window_size: int, use_mask_unit_attn: bool, stage_num: Optional[int]=None) -> None: super().__init__() previous_stage_used_masked_attention = False if stage_num is not None: previous_stage_used_masked_attention = config.masked_unit_attention[stage_num - 1 if stage_num > 0 else 0] self.layers = nn.ModuleList([HieraLayer(config=config, hidden_size=hidden_size if i == 0 else hidden_size_output, hidden_size_output=hidden_size_output, num_heads=num_heads, drop_path=drop_path[i], query_stride=query_stride[i], window_size=window_size, use_mask_unit_attn=use_mask_unit_attn or (previous_stage_used_masked_attention and i == 0)) for i in range(depth)]) def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor], output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: for (i, layer_module) in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None (hidden_states, attn_weights) = layer_module(hidden_states, layer_head_mask, output_attentions=output_attentions) return (hidden_states, attn_weights) def undo_windowing(hidden_states: torch.Tensor, shape: List[int], mask_unit_shape: List[int]) -> torch.Tensor: (batch_size, hidden_size) = (hidden_states.shape[0], hidden_states.shape[-1]) num_mask_units = [s // mu for (s, mu) in zip(shape, mask_unit_shape)] hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size) hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5) hidden_states = hidden_states.reshape(batch_size, *shape, hidden_size) return hidden_states class HieraEncoder(nn.Module): def __init__(self, config: HieraConfig) -> None: super().__init__() total_depth = sum(config.depths) dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, total_depth)] cumulative_depths = torch.tensor(config.depths).cumsum(0).tolist() query_pool_layer = cumulative_depths[:config.num_query_pool] query_strides = [math.prod(config.query_stride) if i in query_pool_layer else 1 for i in range(total_depth)] self.stages = nn.ModuleList() hidden_size = config.embed_dim stage_ends = [0] + cumulative_depths masked_unit_area = math.prod(config.masked_unit_size) query_stride_area = math.prod(config.query_stride) for (idx_stage, depth) in enumerate(config.depths): hidden_size_output = int(config.embed_dim * config.embed_dim_multiplier ** idx_stage) stage = HieraStage(config=config, depth=depth, hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=config.num_heads[idx_stage], drop_path=dpr[stage_ends[idx_stage]:stage_ends[idx_stage + 1]], query_stride=query_strides[stage_ends[idx_stage]:stage_ends[idx_stage + 1]], window_size=int(masked_unit_area * query_stride_area ** (-idx_stage)), use_mask_unit_attn=config.masked_unit_attention[idx_stage], stage_num=idx_stage) hidden_size = hidden_size_output self.stages.append(stage) stage_size = [i // s for (i, s) in zip(config.image_size, config.patch_stride)] unroll_schedule = [config.query_stride] * len(config.depths[:-1]) self.schedule = {} for idx_stage in range(len(config.depths)): self.schedule[idx_stage] = (unroll_schedule, stage_size) if idx_stage < config.num_query_pool: stage_size = [i // s for (i, s) in zip(stage_size, config.query_stride)] unroll_schedule = unroll_schedule[1:] self.gradient_checkpointing = False def reroll(self, hidden_states: torch.Tensor, stage_idx: int, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor: (schedule, size) = self.schedule[stage_idx] (batch_size, seq_len, hidden_size) = hidden_states.shape num_dim = len(size) mask_unit_shape = [1] * num_dim for strides in schedule: hidden_states = hidden_states.view(batch_size, *strides, seq_len // math.prod(strides), *mask_unit_shape, hidden_size) hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5, 6) for i in range(num_dim): mask_unit_shape[i] *= strides[i] hidden_states = hidden_states.reshape(batch_size, -1, *mask_unit_shape, hidden_size) seq_len = hidden_states.shape[1] hidden_states = hidden_states.view(batch_size, seq_len, *mask_unit_shape, hidden_size) if bool_masked_pos is not None: return hidden_states hidden_states = undo_windowing(hidden_states, size, mask_unit_shape) return hidden_states def forward(self, hidden_states: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) reshaped_hidden_states = self.reroll(hidden_states, stage_idx=0, bool_masked_pos=bool_masked_pos) all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,) for (i, stage_module) in enumerate(self.stages): layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(stage_module.__call__, hidden_states, layer_head_mask, output_attentions) else: layer_outputs = stage_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) reshaped_hidden_states = self.reroll(hidden_states, stage_idx=i, bool_masked_pos=bool_masked_pos) all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None)) return HieraEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states) def unroll(hidden_states: torch.Tensor, image_shape: Tuple[int, int], patch_stride: Tuple[int, int], schedule: List[List[int]]) -> torch.Tensor: (batch_size, _, hidden_size) = hidden_states.shape size = [i // s for (i, s) in zip(image_shape, patch_stride)] current_size = size hidden_states = hidden_states.view(*[batch_size] + current_size + [hidden_size]) for strides in schedule: current_size = [i // s for (i, s) in zip(current_size, strides)] new_shape = [item for pair in zip(current_size, strides) for item in pair] new_shape = [batch_size] + new_shape + [hidden_size] hidden_states = hidden_states.view(new_shape) num_dims = len(new_shape) permute = [0] + list(range(2, num_dims - 1, 2)) + list(range(1, num_dims - 1, 2)) + [num_dims - 1] hidden_states = hidden_states.permute(permute) hidden_states = hidden_states.flatten(0, len(strides)) batch_size *= math.prod(strides) hidden_states = hidden_states.reshape(-1, math.prod(size), hidden_size) return hidden_states class HieraPreTrainedModel(PreTrainedModel): config_class = HieraConfig base_model_prefix = 'hiera' main_input_name = 'pixel_values' supports_gradient_checkpointing = True def _init_weights(self, module) -> None: std = self.config.initializer_range if isinstance(module, HieraEmbeddings): nn.init.trunc_normal_(module.position_embeddings, std=std) elif isinstance(module, HieraDecoder): nn.init.trunc_normal_(module.mask_token, std=std) nn.init.trunc_normal_(module.decoder_position_embeddings, std=std) elif isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d)): nn.init.trunc_normal_(module.weight, std=std) if module.bias is not None: nn.init.constant_(module.bias, std) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.bias, std) nn.init.constant_(module.weight, self.config.layer_norm_init) HIERA_START_DOCSTRING = '\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`HieraConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' HIERA_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`BitImageProcessor.__call__`]\n for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n interpolate_pos_encoding (`bool`, *optional*):\n Whether to interpolate the pre-trained position encodings.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class HieraPooler(nn.Module): def __init__(self, config: HieraConfig): super().__init__() num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) self.layernorm = nn.LayerNorm(num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = hidden_states.transpose(1, 2) pooled_output = self.pooler(hidden_states) pooled_output = torch.flatten(pooled_output, 1) pooled_output = self.layernorm(pooled_output) return pooled_output @add_start_docstrings('The bare Hiera Model transformer outputting raw hidden-states without any specific head on top.', HIERA_START_DOCSTRING, '\n add_pooling_layer (`bool`, *optional*, defaults to `True`):\n Whether or not to apply pooling layer.\n is_mae (`bool`, *optional*, defaults to `False`):\n Whether or not to run the model on MAE mode.\n ') class HieraModel(HieraPreTrainedModel): def __init__(self, config: HieraConfig, add_pooling_layer: bool=True, is_mae: bool=False): super().__init__(config) self.num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) self.embeddings = HieraEmbeddings(config, is_mae=is_mae) self.encoder = HieraEncoder(config) self.unroll_schedule = [config.query_stride] * len(config.depths[:-1]) self.pooler = HieraPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self) -> HieraPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=HieraModelOutput, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE) def forward(self, pixel_values: Optional[torch.Tensor]=None, noise: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') head_mask = self.get_head_mask(head_mask, len(self.config.depths)) (embedding_output, bool_masked_pos, ids_restore) = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, noise=noise) image_shape = (pixel_values.shape[-2], pixel_values.shape[-1]) hidden_states = unroll(embedding_output, image_shape=image_shape, patch_stride=self.config.patch_stride, schedule=self.unroll_schedule) if bool_masked_pos is not None: mask_unit_area = math.prod(self.config.masked_unit_size) (batch_size, _, hidden_size) = hidden_states.shape positions = bool_masked_pos.unsqueeze(-1).tile(1, mask_unit_area, hidden_size) hidden_states = hidden_states[positions] hidden_states = hidden_states.view(batch_size, -1, hidden_size) encoder_outputs = self.encoder(hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output) if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) head_outputs = head_outputs + (bool_masked_pos, ids_restore) if bool_masked_pos is not None else head_outputs return head_outputs + encoder_outputs[1:] return HieraModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, bool_masked_pos=bool_masked_pos, ids_restore=ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states) class HieraDecoder(nn.Module): def __init__(self, config: HieraConfig): super().__init__() num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1)) tokens_spatial_shape = [i // s for (i, s) in zip(config.image_size, config.patch_stride)] self.tokens_spatial_shape_final = [i // s ** config.num_query_pool for (i, s) in zip(tokens_spatial_shape, config.query_stride)] self.mask_unit_spatial_shape_final = [i // s ** config.num_query_pool for (i, s) in zip(config.masked_unit_size, config.query_stride)] self.decoder_embeddings = nn.Linear(num_features, config.decoder_hidden_size) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.decoder_position_embeddings = nn.Parameter(torch.zeros(1, math.prod(self.tokens_spatial_shape_final), config.decoder_hidden_size)) self.decoder_block = HieraStage(config=config, hidden_size=config.decoder_hidden_size, hidden_size_output=config.decoder_hidden_size, num_heads=config.decoder_num_heads, depth=config.decoder_depth, use_mask_unit_attn=False, drop_path=[0.0] * config.decoder_depth, query_stride=[1] * config.decoder_depth, window_size=0) self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps) self.pred_stride = config.patch_stride[-1] * config.query_stride[-1] ** config.num_query_pool pred_dim = self.pred_stride ** len(config.query_stride) * config.num_channels self.decoder_pred = nn.Linear(config.decoder_hidden_size, pred_dim) def forward(self, encoder_hidden_states: torch.Tensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, torch.BoolTensor]: hidden_states = self.decoder_embeddings(encoder_hidden_states) (mask_unit_height, mask_unit_width, decoder_hidden_size) = hidden_states.shape[2:] (batch_size, num_mask_units) = bool_masked_pos.shape decoder_hidden_states = torch.zeros(batch_size, num_mask_units, mask_unit_height, mask_unit_width, decoder_hidden_size, device=hidden_states.device, dtype=hidden_states.dtype) mask_tokens = self.mask_token.view(1, 1, 1, 1, -1) bool_masked_pos = bool_masked_pos.reshape(batch_size, num_mask_units, 1, 1, 1) bool_masked_pos = bool_masked_pos.expand(-1, -1, mask_unit_height, mask_unit_width, decoder_hidden_size) decoder_hidden_states[bool_masked_pos] = hidden_states.flatten() decoder_hidden_states = (1 - bool_masked_pos.float()) * mask_tokens + bool_masked_pos.float() * decoder_hidden_states hidden_states = undo_windowing(decoder_hidden_states, self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final) bool_masked_pos = undo_windowing(bool_masked_pos[..., 0:1], self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final) hidden_states = hidden_states.reshape(hidden_states.shape[0], -1, hidden_states.shape[-1]) bool_masked_pos = bool_masked_pos.view(hidden_states.shape[0], -1) hidden_states = hidden_states + self.decoder_position_embeddings (hidden_states, attn_weights) = self.decoder_block(hidden_states, head_mask=head_mask, output_attentions=output_attentions) hidden_states = self.decoder_norm(hidden_states) hidden_states = self.decoder_pred(hidden_states) return (hidden_states, bool_masked_pos) class HieraMultiScaleHead(nn.Module): def __init__(self, config: HieraConfig): super().__init__() self.mask_unit_spatial_shape_final = [i // s ** config.num_query_pool for (i, s) in zip(config.masked_unit_size, config.query_stride)] self.stage_dimensions = [int(config.embed_dim * config.embed_dim_multiplier ** i) for i in range(len(config.depths))] current_masked_unit_size = config.masked_unit_size self.multi_scale_fusion_heads = nn.ModuleList() for idx in range(config.num_query_pool): kernel = [i // s for (i, s) in zip(current_masked_unit_size, self.mask_unit_spatial_shape_final)] current_masked_unit_size = [i // s for (i, s) in zip(current_masked_unit_size, config.query_stride)] self.multi_scale_fusion_heads.append(nn.Conv2d(self.stage_dimensions[idx], self.stage_dimensions[-1], kernel_size=kernel, stride=kernel)) self.multi_scale_fusion_heads.append(nn.Identity()) def apply_fusion_head(self, head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor: if isinstance(head, nn.Identity): return hidden_states (batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size) = hidden_states.shape hidden_states = hidden_states.reshape(batch_size * num_mask_units, mask_unit_height, mask_unit_width, hidden_size) hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = head(hidden_states) hidden_states = hidden_states.permute(0, 2, 3, 1) (mask_unit_height_final, mask_unit_width_final, hidden_size) = hidden_states.shape[1:] hidden_states = hidden_states.reshape(batch_size, num_mask_units, mask_unit_height_final, mask_unit_width_final, hidden_size) return hidden_states def forward(self, feature_maps: List[torch.Tensor]) -> torch.Tensor: hidden_states = 0.0 for (head, feature_map) in zip(self.multi_scale_fusion_heads, feature_maps): hidden_states = hidden_states + self.apply_fusion_head(head, feature_map) return hidden_states @add_start_docstrings('The Hiera Model transformer with the decoder on top for self-supervised pre-training.\n\n \n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n \n ', HIERA_START_DOCSTRING) class HieraForPreTraining(HieraPreTrainedModel): def __init__(self, config: HieraConfig) -> None: super().__init__(config) self.hiera = HieraModel(config, add_pooling_layer=False, is_mae=True) self.encoder_norm = nn.LayerNorm(self.hiera.num_features, eps=config.layer_norm_eps) self.multiscale_fusion = HieraMultiScaleHead(config) self.decoder = HieraDecoder(config) self.pred_stride = self.decoder.pred_stride self.post_init() def get_pixel_label_2d(self, pixel_values: torch.Tensor, bool_masked_pos: torch.BoolTensor) -> torch.Tensor: pixel_values = pixel_values.permute(0, 2, 3, 1) size = self.pred_stride label = pixel_values.unfold(1, size, size).unfold(2, size, size) label = label.flatten(1, 2).flatten(2) label = label[bool_masked_pos] if self.config.normalize_pixel_loss: mean = label.mean(dim=-1, keepdim=True) var = label.var(dim=-1, keepdim=True) label = (label - mean) / (var + 1e-06) ** 0.5 return label def forward_loss(self, pixel_values: torch.Tensor, logits: torch.Tensor, bool_masked_pos: torch.BoolTensor): bool_masked_pos = ~bool_masked_pos label = self.get_pixel_label_2d(pixel_values, bool_masked_pos) logits = logits[bool_masked_pos] loss = (logits - label) ** 2 loss = loss.mean() return loss @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=HieraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward(self, pixel_values: Optional[torch.Tensor]=None, noise: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, HieraForPreTrainingOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.hiera(pixel_values, noise=noise, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) feature_maps = outputs[-1] bool_masked_pos = outputs[1] ids_to_restore = outputs[2] feature_maps = feature_maps[1:self.hiera.config.num_query_pool + 1] + (feature_maps[-1],) fused_hidden_states = self.multiscale_fusion(feature_maps) fused_hidden_states = self.encoder_norm(fused_hidden_states) (logits, bool_masked_pos) = self.decoder(fused_hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions) loss = self.forward_loss(pixel_values, logits, bool_masked_pos) if not return_dict: output = (logits, bool_masked_pos, ids_to_restore) if output_hidden_states: output = output + (outputs[3],) if output_attentions: output = output + (outputs[4],) if output_hidden_states: output = output + (outputs[-1],) return (loss,) + output if loss is not None else output return HieraForPreTrainingOutput(loss=loss, logits=logits, bool_masked_pos=bool_masked_pos, ids_restore=ids_to_restore, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states if output_hidden_states else None) @add_start_docstrings("\n Hiera Model transformer with an image classification head on top (a linear layer on top of the final hidden state with\n average pooling) e.g. for ImageNet.\n\n \n\n Note that it's possible to fine-tune Hiera on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n \n ", HIERA_START_DOCSTRING) class HieraForImageClassification(HieraPreTrainedModel): def __init__(self, config: HieraConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.hiera = HieraModel(config, add_pooling_layer=True, is_mae=False) self.classifier = nn.Linear(self.hiera.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() self.post_init() @add_start_docstrings_to_model_forward(HIERA_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=HieraForImageClassificationOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT) def forward(self, pixel_values, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, HieraForImageClassificationOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.hiera(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return HieraForImageClassificationOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states) @add_start_docstrings('\n Hiera backbone, to be used with frameworks like DETR and MaskFormer.\n ', HIERA_START_DOCSTRING) class HieraBackbone(HieraPreTrainedModel, BackboneMixin): def __init__(self, config: HieraConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * config.embed_dim_multiplier ** i) for i in range(len(config.depths))] self.embeddings = HieraEmbeddings(config, is_mae=False) self.encoder = HieraEncoder(config) hidden_states_norms = {} for (stage, num_channels) in zip(self._out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions (embedding_output, _, _) = self.embeddings(pixel_values) outputs = self.encoder(embedding_output, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict) hidden_states = outputs[-1] feature_maps = () for (stage, hidden_state) in zip(self.stage_names, hidden_states): if stage in self.out_features: (batch_size, height, width, num_channels) = hidden_state.shape hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs[1],) if output_attentions: output += (outputs[2],) return output return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs[1] if output_hidden_states else None, attentions=outputs[2] if output_attentions else None) # File: transformers-main/src/transformers/models/hubert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = {'configuration_hubert': ['HubertConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_hubert'] = ['HubertForCTC', 'HubertForSequenceClassification', 'HubertModel', 'HubertPreTrainedModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_hubert'] = ['TFHubertForCTC', 'TFHubertModel', 'TFHubertPreTrainedModel'] if TYPE_CHECKING: from .configuration_hubert import HubertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_hubert import HubertForCTC, HubertForSequenceClassification, HubertModel, HubertPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_hubert import TFHubertForCTC, TFHubertModel, TFHubertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/hubert/configuration_hubert.py """""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class HubertConfig(PretrainedConfig): model_type = 'hubert' def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_layer_norm=True, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='sum', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_layer_norm = feat_proj_layer_norm self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers: raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) # File: transformers-main/src/transformers/models/hubert/convert_distilhubert_original_s3prl_checkpoint_to_pytorch.py """""" import argparse import torch from s3prl.hub import distilhubert from transformers import HubertConfig, HubertModel, Wav2Vec2FeatureExtractor, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = {'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'mask_emb': 'masked_spec_embed'} def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split('.'): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, f"Shape of hf {(key + '.' + weight_type if weight_type is not None else '')} is {hf_shape}, but should be {value.shape} for {full_name}" if weight_type == 'weight': hf_pointer.weight.data = value elif weight_type == 'weight_g': hf_pointer.weight_g.data = value elif weight_type == 'weight_v': hf_pointer.weight_v.data = value elif weight_type == 'bias': hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{(key + '.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor for (name, value) in fairseq_dict.items(): is_used = False if 'conv_layers' in name: load_conv_layer(name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == 'group') is_used = True else: for (key, mapped_key) in MAPPING.items(): mapped_key = mapped_key if key in name: is_used = True if '*' in mapped_key: layer_index = name.split(key)[0].split('.')[-2] mapped_key = mapped_key.replace('*', layer_index) if 'weight_g' in name: weight_type = 'weight_g' elif 'weight_v' in name: weight_type = 'weight_v' elif 'weight' in name: weight_type = 'weight' elif 'bias' in name: weight_type = 'bias' else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f'Unused weights: {unused_weights}') def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split('conv_layers.')[-1] items = name.split('.') layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if 'bias' in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif 'weight' in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif type_id == 2 and (not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if 'bias' in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif 'weight' in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(full_name) def convert_config(model): config = HubertConfig() fs_config = model.config config.activation_dropout = fs_config.activation_dropout config.apply_spec_augment = False config.attention_dropout = fs_config.attention_dropout config.conv_bias = False conv_layers = eval(fs_config.extractor_conv_feature_layers) config.conv_dim = [x[0] for x in conv_layers] config.conv_kernel = [x[1] for x in conv_layers] config.conv_stride = [x[2] for x in conv_layers] config.feat_extract_activation = 'gelu' config.feat_extract_norm = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' config.feat_proj_layer_norm = False config.feat_proj_dropout = 0.0 config.final_dropout = 0.0 config.hidden_act = fs_config.activation_fn config.hidden_dropout = fs_config.dropout config.hidden_size = fs_config.encoder_embed_dim config.initializer_range = 0.02 config.intermediate_size = fs_config.encoder_ffn_embed_dim config.layer_norm_eps = 1e-05 config.layerdrop = 0.0 config.num_attention_heads = fs_config.encoder_attention_heads config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups config.num_conv_pos_embeddings = fs_config.conv_pos config.num_feat_extract_layers = len(conv_layers) config.num_hidden_layers = fs_config.encoder_layers return config @torch.no_grad() def convert_hubert_checkpoint(pytorch_dump_folder_path, config_path=None): model = distilhubert().model.model if config_path is not None: config = HubertConfig.from_pretrained(config_path) else: config = convert_config(model) model = model.eval() feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=False, return_attention_mask=False) hf_model = HubertModel(config) recursively_load_weights(model, hf_model) feature_extractor.save_pretrained(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') args = parser.parse_args() convert_hubert_checkpoint(args.pytorch_dump_folder_path, args.config_path) # File: transformers-main/src/transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py """""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import HubertConfig, HubertForCTC, HubertModel, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = {'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed'} def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split('.'): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, f"Shape of hf {(key + '.' + weight_type if weight_type is not None else '')} is {hf_shape}, but should be {value.shape} for {full_name}" if weight_type == 'weight': hf_pointer.weight.data = value elif weight_type == 'weight_g': hf_pointer.weight_g.data = value elif weight_type == 'weight_v': hf_pointer.weight_v.data = value elif weight_type == 'bias': hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{(key + '.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for (name, value) in fairseq_dict.items(): is_used = False if 'conv_layers' in name: load_conv_layer(name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == 'group') is_used = True else: for (key, mapped_key) in MAPPING.items(): mapped_key = 'hubert.' + mapped_key if is_finetuned and mapped_key != 'lm_head' else mapped_key if key in name or (key.split('w2v_model.')[-1] == name.split('.')[0] and (not is_finetuned)): is_used = True if '*' in mapped_key: layer_index = name.split(key)[0].split('.')[-2] mapped_key = mapped_key.replace('*', layer_index) if 'weight_g' in name: weight_type = 'weight_g' elif 'weight_v' in name: weight_type = 'weight_v' elif 'weight' in name: weight_type = 'weight' elif 'bias' in name: weight_type = 'bias' else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f'Unused weights: {unused_weights}') def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split('conv_layers.')[-1] items = name.split('.') layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if 'bias' in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif 'weight' in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.') elif type_id == 2 and (not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if 'bias' in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') elif 'weight' in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.') else: unused_weights.append(full_name) @torch.no_grad() def convert_hubert_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True): if config_path is not None: config = HubertConfig.from_pretrained(config_path) else: config = HubertConfig() if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, 'vocab.json') if not os.path.isdir(pytorch_dump_folder_path): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(pytorch_dump_folder_path)) return os.makedirs(pytorch_dump_folder_path, exist_ok=True) with open(vocab_path, 'w', encoding='utf-8') as vocab_handle: json.dump(target_dict.indices, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer(vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=False) return_attention_mask = True if config.feat_extract_norm == 'layer' else False feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_wav2vec = HubertForCTC(config) else: hf_wav2vec = HubertModel(config) if is_finetuned: (model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])}) else: (model, _, _) = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) model = model[0].eval() recursively_load_weights(model, hf_wav2vec, is_finetuned) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not') args = parser.parse_args() convert_hubert_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned) # File: transformers-main/src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py """""" import argparse import torch from transformers import HubertConfig, HubertForSequenceClassification, Wav2Vec2FeatureExtractor, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) SUPPORTED_MODELS = ['UtteranceLevel'] @torch.no_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') if checkpoint['Config']['downstream_expert']['modelrc']['select'] not in SUPPORTED_MODELS: raise NotImplementedError(f'The supported s3prl models are {SUPPORTED_MODELS}') downstream_dict = checkpoint['Downstream'] hf_congfig = HubertConfig.from_pretrained(config_path) hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(base_model_name, return_attention_mask=True, do_normalize=False) if hf_congfig.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint['Featurizer']['weights'] hf_model.projector.weight.data = downstream_dict['projector.weight'] hf_model.projector.bias.data = downstream_dict['projector.bias'] hf_model.classifier.weight.data = downstream_dict['model.post_net.linear.weight'] hf_model.classifier.bias.data = downstream_dict['model.post_net.linear.bias'] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.') parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') args = parser.parse_args() convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) # File: transformers-main/src/transformers/models/hubert/modeling_hubert.py """""" import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging, replace_return_docstrings from .configuration_hubert import HubertConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 _CONFIG_FOR_DOC = 'HubertConfig' _CHECKPOINT_FOR_DOC = 'facebook/hubert-large-ls960-ft' _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'" _CTC_EXPECTED_LOSS = 22.68 _SEQ_CLASS_CHECKPOINT = 'superb/hubert-base-superb-ks' _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'" _SEQ_CLASS_EXPECTED_LOSS = 8.53 def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor]=None, min_masks: int=0) -> np.ndarray: (batch_size, sequence_length) = shape if mask_length < 1: raise ValueError('`mask_length` has to be bigger than 0.') if mask_length > sequence_length: raise ValueError(f'`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`') epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span input_lengths = attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: num_masked_span = compute_num_masked_span(input_length) spec_aug_mask_idx = np.random.choice(np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False) if len(spec_aug_mask_idx) == 0: dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate([spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(batch_size, max_num_masked_span * mask_length) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask class HubertNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class HubertLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states class HubertGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class HubertPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, 'weight_norm'): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name='weight', dim=2) if hasattr(self.conv, 'parametrizations'): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name='weight', dim=2) self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class HubertSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, :-self.num_pad_remove] return hidden_states class HubertFeatureEncoder(nn.Module): def __init__(self, config): super().__init__() if config.feat_extract_norm == 'group': conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)] elif config.feat_extract_norm == 'layer': conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError(f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']") self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: hidden_states = self._gradient_checkpointing_func(conv_layer.__call__, hidden_states) else: hidden_states = conv_layer(hidden_states) return hidden_states class HubertFeatureExtractor(HubertFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn(f'The class `{self.__class__.__name__}` has been depreciated and will be removed in Transformers v5. Use `{self.__class__.__bases__[0].__name__}` instead.', FutureWarning) class HubertFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.feat_proj_layer_norm = config.feat_proj_layer_norm if self.feat_proj_layer_norm: self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): if self.feat_proj_layer_norm: hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class HubertAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[HubertConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class HubertFlashAttention2(HubertAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions: raise ValueError('HubertFlashAttention2 attention does not support output_attentions') is_cross_attention = key_value_states is not None (bsz, q_len, _) = hidden_states.size() query_states = self._reshape(self.q_proj(hidden_states), -1, bsz) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0].transpose(1, 2) value_states = past_key_value[1].transpose(1, 2) elif is_cross_attention: key_states = self._reshape(self.k_proj(key_value_states), -1, bsz) value_states = self._reshape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1) value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1) else: key_states = self._reshape(self.k_proj(hidden_states), -1, bsz) value_states = self._reshape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2)) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class HubertSdpaAttention(HubertAttention): def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if output_attentions or layer_head_mask is not None: logger.warning_once('HubertModel is using HubertSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.') return super().forward(hidden_states, key_value_states=key_value_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) is_causal = True if self.is_causal and attention_mask is None and (tgt_len > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal) if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, None, past_key_value) HUBERT_ATTENTION_CLASSES = {'eager': HubertAttention, 'sdpa': HubertSdpaAttention, 'flash_attention_2': HubertFlashAttention2} class HubertFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class HubertEncoderLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = HubertFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): attn_residual = hidden_states (hidden_states, attn_weights, _) = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class HubertAttnAdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.input_dim = config.adapter_attn_dim self.hidden_dim = config.hidden_size self.norm = nn.LayerNorm(self.hidden_dim) self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim) self.act_fn = nn.ReLU() self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim) def forward(self, hidden_states: torch.FloatTensor): hidden_states = self.norm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class HubertEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = HubertFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if getattr(config, 'adapter_attn_dim', None) is not None: self.adapter_layer = HubertAttnAdapterLayer(config) else: self.adapter_layer = None def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) (hidden_states, attn_weights, _) = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) if self.adapter_layer is not None: hidden_states = hidden_states + self.adapter_layer(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class HubertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = HubertPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 if self._use_flash_attention_2: attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None else: attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and dropout_probability < self.config.layerdrop else False if not skip_the_layer or deepspeed_zero3_is_enabled: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class HubertEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = HubertPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 if self._use_flash_attention_2: attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None else: attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = torch.rand([]) skip_the_layer = True if self.training and dropout_probability < self.config.layerdrop else False if not skip_the_layer or deepspeed_zero3_is_enabled: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) class HubertPreTrainedModel(PreTrainedModel): config_class = HubertConfig base_model_prefix = 'hubert' main_input_name = 'input_values' supports_gradient_checkpointing = True _supports_flash_attn_2 = True _supports_sdpa = True def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): if is_deepspeed_zero3_enabled(): import deepspeed if hasattr(module, 'weight_v') and hasattr(module, 'weight_g'): with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0): nn.init.kaiming_normal_(module.weight.data) else: with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0): nn.init.kaiming_normal_(module.weight.data) else: nn.init.kaiming_normal_(module.weight.data) if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None: module.bias.data.zero_() def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): def _conv_out_length(input_length, kernel_size, stride): return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1 for (kernel_size, stride) in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask HUBERT_START_DOCSTRING = '\n Hubert was proposed in [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden\n Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia,\n Ruslan Salakhutdinov, Abdelrahman Mohamed.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`HubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' HUBERT_INPUTS_DOCSTRING = '\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file\n into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install\n soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and\n conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n \n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, such as\n [hubert-base](https://huggingface.co/facebook/hubert-base-ls960), `attention_mask` should **not** be passed\n to avoid degraded performance when doing batched inference. For such models `input_values` should simply be\n padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different\n results depending on whether `input_values` is padded or not.\n\n \n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings('The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.', HUBERT_START_DOCSTRING) class HubertModel(HubertPreTrainedModel): def __init__(self, config: HubertConfig): super().__init__(config) self.config = config self.feature_extractor = HubertFeatureEncoder(config) self.feature_projection = HubertFeatureProjection(config) if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = HubertEncoderStableLayerNorm(config) else: self.encoder = HubertEncoder(config) self.post_init() def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None): if not getattr(self.config, 'apply_spec_augment', True): return hidden_states (batch_size, sequence_length, hidden_size) = hidden_states.size() if mask_time_indices is not None: hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask) hidden_states = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) @add_start_docstrings('Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', HUBERT_START_DOCSTRING) class HubertForCTC(HubertPreTrainedModel): def __init__(self, config, target_lang: Optional[str]=None): super().__init__(config) self.hubert = HubertModel(config) self.dropout = nn.Dropout(config.final_dropout) self.target_lang = target_lang if config.vocab_size is None: raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.") output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) self.post_init() def tie_weights(self): target_lang = self.target_lang if target_lang is not None and getattr(self.config, 'adapter_attn_dim', None) is None: raise ValueError(f'Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.') elif target_lang is None and getattr(self.config, 'adapter_attn_dim', None) is not None: logger.info("By default `target_lang` is set to 'eng'.") elif target_lang is not None: self.load_adapter(target_lang, force_load=True) def freeze_feature_extractor(self): warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): self.hubert.feature_extractor._freeze_parameters() def freeze_base_model(self): for param in self.hubert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS) def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[Tuple, CausalLMOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None and labels.max() >= self.config.vocab_size: raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}') outputs = self.hubert(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ', HUBERT_START_DOCSTRING) class HubertForSequenceClassification(HubertPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, 'add_adapter') and config.add_adapter: raise ValueError('Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)') self.hubert = HubertModel(config) num_layers = config.num_hidden_layers + 1 if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) self.post_init() def freeze_feature_extractor(self): warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): self.hubert.feature_extractor._freeze_parameters() def freeze_base_model(self): for param in self.hubert.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings(checkpoint=_SEQ_CLASS_CHECKPOINT, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS) def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[Tuple, SequenceClassifierOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.hubert(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) # File: transformers-main/src/transformers/models/hubert/modeling_tf_hubert.py """""" from __future__ import annotations import warnings from typing import Any, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput from ...modeling_tf_utils import TFPreTrainedModel, get_initializer, keras, keras_serializable, unpack_inputs from ...tf_utils import shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_hubert import HubertConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'HubertConfig' LARGE_NEGATIVE = -100000000.0 def _sample_without_replacement(distribution, num_samples): z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) (_, indices) = tf.nn.top_k(distribution + z, num_samples) return indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): indices_shape = shape_list(batch_indices) broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]) pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, min_masks: int=0) -> tf.Tensor: (batch_size, sequence_length) = shape if mask_length < 1: raise ValueError('`mask_length` has to be bigger than 0.') tf.debugging.assert_less(mask_length, sequence_length, message=f'`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`') num_masked_spans = mask_prob * tf.cast(sequence_length, tf.float32) / mask_length + tf.random.uniform((1,)) num_masked_spans = tf.maximum(num_masked_spans, min_masks) num_masked_spans = tf.cast(num_masked_spans, tf.int32) num_masked_spans = tf.math.minimum(sequence_length // mask_length, num_masked_spans) num_masked_spans = tf.squeeze(num_masked_spans) spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets spec_aug_mask = _scatter_values_on_batch_indices(tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, tf.shape(spec_aug_mask)) return spec_aug_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int]=None): src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE class TFHubertGroupNorm(keras.layers.Layer): def __init__(self, groups: int=32, axis: int=-1, epsilon: float=0.001, center: bool=True, scale: bool=True, beta_initializer: keras.initializers.Initializer='zeros', gamma_initializer: keras.initializers.Initializer='ones', beta_regularizer: keras.regularizers.Regularizer=None, gamma_regularizer: keras.regularizers.Regularizer=None, beta_constraint: keras.constraints.Constraint=None, gamma_constraint: keras.constraints.Constraint=None, **kwargs): super().__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = keras.initializers.get(beta_initializer) self.gamma_initializer = keras.initializers.get(gamma_initializer) self.beta_regularizer = keras.regularizers.get(beta_regularizer) self.gamma_regularizer = keras.regularizers.get(gamma_regularizer) self.beta_constraint = keras.constraints.get(beta_constraint) self.gamma_constraint = keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super().build(input_shape) def call(self, inputs): input_shape = keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) (reshaped_inputs, group_shape) = self._reshape_into_groups(inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) is_instance_norm = input_shape[self.axis] // self.groups == 1 if not is_instance_norm: outputs = tf.reshape(normalized_inputs, tensor_input_shape) else: outputs = normalized_inputs return outputs def get_config(self): config = {'groups': self.groups, 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': keras.initializers.serialize(self.beta_initializer), 'gamma_initializer': keras.initializers.serialize(self.gamma_initializer), 'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer), 'beta_constraint': keras.constraints.serialize(self.beta_constraint), 'gamma_constraint': keras.constraints.serialize(self.gamma_constraint)} base_config = super().get_config() return {**base_config, **config} def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] is_instance_norm = input_shape[self.axis] // self.groups == 1 if not is_instance_norm: group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(self.axis, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return (reshaped_inputs, group_shape) else: return (inputs, group_shape) def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(1, len(group_shape))) is_instance_norm = input_shape[self.axis] // self.groups == 1 if not is_instance_norm: axis = -2 if self.axis == -1 else self.axis - 1 else: axis = -1 if self.axis == -1 else self.axis - 1 group_reduction_axes.pop(axis) (mean, variance) = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True) (gamma, beta) = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization(reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return (gamma, beta) def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError('Axis ' + str(self.axis) + ' of input tensor should have a defined dimension but the layer received an input with shape ' + str(input_shape) + '.') def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError('Number of groups (' + str(self.groups) + ') cannot be more than the number of channels (' + str(dim) + ').') if dim % self.groups != 0: raise ValueError('Number of groups (' + str(self.groups) + ') must be a multiple of the number of channels (' + str(dim) + ').') def _check_axis(self): if self.axis == 0: raise ValueError('You are trying to normalize your batch axis. Do you want to use tf.layer.batch_normalization instead') def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight(shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight(shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) is_instance_norm = input_shape[self.axis] // self.groups == 1 if not is_instance_norm: broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(self.axis, self.groups) else: broadcast_shape[self.axis] = self.groups return broadcast_shape class TFHubertWeightNormConv1D(keras.layers.Conv1D): def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs): super().__init__(filters=filters, kernel_size=kernel_size, groups=groups, padding='valid', use_bias=True, bias_initializer='he_normal', **kwargs) self.explicit_padding = explicit_padding self.filter_axis = 2 self.kernel_norm_axes = tf.constant([0, 1]) def _init_norm(self): kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes)) self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis]) def _normalize_kernel(self): kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g) self.kernel = tf.transpose(kernel) def build(self, input_shape): if not self.built: super().build(input_shape) self.kernel = tf.Variable(tf.transpose(self.kernel), name='weight_v', trainable=True) self.weight_v = self.kernel self.weight_g = self.add_weight(name='weight_g', shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1), initializer='ones', dtype=self.weight_v.dtype, trainable=True) self._init_norm() self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer='zeros', trainable=True) def call(self, inputs): self._normalize_kernel() padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))) output = super().call(padded_inputs) return output class TFHubertNoLayerNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int=0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D(filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name='conv') self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'conv', None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) class TFHubertLayerNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int=0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D(filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name='conv') self.layer_norm = keras.layers.LayerNormalization(name='layer_norm', epsilon=config.layer_norm_eps) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'conv', None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) class TFHubertGroupNormConvLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int=0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = keras.layers.Conv1D(filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name='conv') self.activation = get_tf_activation(config.feat_extract_activation) self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name='layer_norm') def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'conv', None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.in_conv_dim]) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.out_conv_dim]) class TFHubertPositionalConvEmbedding(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) self.conv = TFHubertWeightNormConv1D(filters=config.hidden_size, kernel_size=config.num_conv_pos_embeddings, groups=config.num_conv_pos_embedding_groups, explicit_padding=config.num_conv_pos_embeddings // 2, name='conv') self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings) self.activation = get_tf_activation(config.feat_extract_activation) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'conv', None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, self.config.hidden_size]) class TFHubertSamePadLayer(keras.layers.Layer): def __init__(self, num_conv_pos_embeddings, **kwargs): super().__init__(**kwargs) self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def call(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :-self.num_pad_remove, :] return hidden_states class TFHubertFeatureEncoder(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) if config.feat_extract_norm == 'group': conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f'conv_layers.{0}')] + [TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f'conv_layers.{i + 1}') for i in range(config.num_feat_extract_layers - 1)] elif config.feat_extract_norm == 'layer': conv_layers = [TFHubertLayerNormConvLayer(config, layer_id=i, name=f'conv_layers.{i}') for i in range(config.num_feat_extract_layers)] else: raise ValueError(f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']") self.conv_layers = conv_layers def call(self, input_values): hidden_states = tf.expand_dims(input_values, -1) for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True for conv_layer in self.conv_layers: with tf.name_scope(conv_layer.name): conv_layer.build(None) class TFHubertFeatureExtractor(TFHubertFeatureEncoder): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) warnings.warn(f'The class `{self.__class__.__name__}` has been depreciated and will be removed in Transformers v5. Use `{self.__class__.__bases__[0].__name__}` instead.', FutureWarning) class TFHubertFeatureProjection(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.projection = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='projection') self.dropout = keras.layers.Dropout(rate=config.feat_proj_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.conv_dim[-1]]) if getattr(self, 'projection', None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, self.config.conv_dim[-1]]) class TFHubertAttention(keras.layers.Layer): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='k_proj') self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='q_proj') self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='v_proj') self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name='out_proj') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call(self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None=None, past_key_value: Tuple[Tuple[tf.Tensor]] | None=None, attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor | None]: is_cross_attention = key_value_states is not None (bsz, tgt_len, embed_dim) = shape_list(hidden_states) query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None: key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal(shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}') if attention_mask is not None: tf.debugging.assert_equal(shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}') attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal(shape_list(layer_head_mask), [self.num_heads], message=f'Head mask for a single layer should be of size {self.num_heads}, but is {shape_list(layer_head_mask)}') attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal(shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}') attn_output = tf.transpose(tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return (attn_output, attn_weights, past_key_value) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) class TFHubertFeedForward(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.intermediate_dropout = keras.layers.Dropout(config.activation_dropout) self.intermediate_dense = keras.layers.Dense(units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='intermediate_dense') self.intermediate_act_fn = get_tf_activation(config.hidden_act) self.output_dense = keras.layers.Dense(units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='output_dense') self.output_dropout = keras.layers.Dropout(config.hidden_dropout) self.config = config def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor: hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, training=training) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'intermediate_dense', None) is not None: with tf.name_scope(self.intermediate_dense.name): self.intermediate_dense.build([None, None, self.config.hidden_size]) if getattr(self, 'output_dense', None) is not None: with tf.name_scope(self.output_dense.name): self.output_dense.build([None, None, self.config.intermediate_size]) class TFHubertEncoderLayer(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name='attention') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.feed_forward = TFHubertFeedForward(config, name='feed_forward') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=False, training: bool=False) -> Tuple[tf.Tensor]: attn_residual = hidden_states (hidden_states, attn_weights, _) = self.attention(hidden_states, attention_mask=attention_mask, training=training) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, 'feed_forward', None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) class TFHubertEncoderLayerStableLayerNorm(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name='attention') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.feed_forward = TFHubertFeedForward(config, name='feed_forward') self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='final_layer_norm') self.config = config def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=False, training: bool=False) -> Tuple[tf.Tensor]: attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) (hidden_states, attn_weights, _) = self.attention(hidden_states, attention_mask=attention_mask, training=training) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'attention', None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, 'feed_forward', None) is not None: with tf.name_scope(self.feed_forward.name): self.feed_forward.build(None) if getattr(self, 'final_layer_norm', None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.config.hidden_size]) class TFHubertEncoder(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name='pos_conv_embed') self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [TFHubertEncoderLayer(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, training=training) for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = np.random.uniform(0, 1) if training and dropout_probability < self.config.layerdrop: continue layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'pos_conv_embed', None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFHubertEncoderStableLayerNorm(keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name='pos_conv_embed') self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm') self.dropout = keras.layers.Dropout(config.hidden_dropout) self.layer = [TFHubertEncoderLayerStableLayerNorm(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)] def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: Optional[bool]=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, training=training) for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = np.random.uniform(0, 1) if training and dropout_probability < self.config.layerdrop: continue layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'pos_conv_embed', None) is not None: with tf.name_scope(self.pos_conv_embed.name): self.pos_conv_embed.build(None) if getattr(self, 'layer_norm', None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, 'layer', None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFHubertMainLayer(keras.layers.Layer): config_class = HubertConfig def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.feature_extractor = TFHubertFeatureEncoder(config, name='feature_extractor') self.feature_projection = TFHubertFeatureProjection(config, name='feature_projection') if config.do_stable_layer_norm: self.encoder = TFHubertEncoderStableLayerNorm(config, name='encoder') else: self.encoder = TFHubertEncoder(config, name='encoder') def build(self, input_shape=None): self.masked_spec_embed = self.add_weight(shape=(self.config.hidden_size,), initializer='uniform', trainable=True, name='masked_spec_embed') if self.built: return self.built = True if getattr(self, 'feature_extractor', None) is not None: with tf.name_scope(self.feature_extractor.name): self.feature_extractor.build(None) if getattr(self, 'feature_projection', None) is not None: with tf.name_scope(self.feature_projection.name): self.feature_projection.build(None) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): def _conv_out_length(input_length, kernel_size, stride): return (input_length - kernel_size) // stride + 1 for (kernel_size, stride) in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: tf.Tensor | None=None): (batch_size, sequence_length, hidden_size) = shape_list(hidden_states) if not getattr(self.config, 'apply_spec_augment', True): return hidden_states if mask_time_indices is not None: hidden_states = tf.where(tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states) elif self.config.mask_time_prob > 0: mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, min_masks=2) hidden_states = tf.where(tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states) if self.config.mask_feature_prob > 0: mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length) hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0) return hidden_states @unpack_inputs def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: tf.Tensor | None=None, output_hidden_states: tf.Tensor | None=None, return_dict: Optional[bool]=None, training: bool=False, **kwargs: Any): hidden_states = self.feature_extractor(tf.cast(input_values, tf.float32), training=training) if attention_mask is not None: output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, -1)) attention_mask = tf.sequence_mask(output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype) hidden_states = self.feature_projection(hidden_states, training=training) mask_time_indices = kwargs.get('mask_time_indices', None) if training: hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states,) + encoder_outputs[1:] return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) class TFHubertPreTrainedModel(TFPreTrainedModel): config_class = HubertConfig base_model_prefix = 'hubert' main_input_name = 'input_values' @property def input_signature(self): return {'input_values': tf.TensorSpec((None, 16000), tf.float32, name='input_values'), 'attention_mask': tf.TensorSpec((None, None), tf.int32, name='attention_mask'), 'token_type_ids': tf.TensorSpec((None, None), tf.int32, name='token_type_ids')} def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) logger.warning(f'\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish to train/fine-tune this model, you need a GPU or a TPU') HUBERT_START_DOCSTRING = '\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_values` only and nothing else: `model(input_values)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({"input_values": input_values, "token_type_ids": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don\'t need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`HubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' HUBERT_INPUTS_DOCSTRING = "\n Args:\n input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_values` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False``):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n" @add_start_docstrings('The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.', HUBERT_START_DOCSTRING) class TFHubertModel(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.hubert = TFHubertMainLayer(config, name='hubert') @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states output_attentions = output_attentions if output_attentions else self.config.output_attentions return_dict = return_dict if return_dict else self.config.return_dict outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'hubert', None) is not None: with tf.name_scope(self.hubert.name): self.hubert.build(None) @add_start_docstrings('TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', HUBERT_START_DOCSTRING) class TFHubertForCTC(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.hubert = TFHubertMainLayer(config, name='hubert') self.dropout = keras.layers.Dropout(config.final_dropout) self.lm_head = keras.layers.Dense(config.vocab_size, name='lm_head') self.output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size def freeze_feature_extractor(self): warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning) self.freeze_feature_encoder() def freeze_feature_encoder(self): self.hubert.feature_extractor.trainable = False @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, labels: tf.Tensor | None=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]: if labels is not None and tf.reduce_max(labels) >= self.config.vocab_size: raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}') outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, training=training) logits = self.lm_head(hidden_states) if labels is not None: attention_mask = attention_mask if attention_mask is not None else tf.ones_like(input_values, dtype=tf.float32) input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1)) labels_mask = tf.cast(labels >= 0, tf.int32) target_lengths = tf.reduce_sum(labels_mask, axis=-1) loss = tf.nn.ctc_loss(logits=logits, labels=labels, logit_length=input_lengths, label_length=target_lengths, blank_index=self.config.pad_token_id, logits_time_major=False) if self.config.ctc_loss_reduction == 'sum': loss = tf.reduce_sum(loss) loss = tf.reshape(loss, (1,)) if self.config.ctc_loss_reduction == 'mean': loss = tf.reduce_mean(loss) loss = tf.reshape(loss, (1,)) else: loss = None if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFCausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'hubert', None) is not None: with tf.name_scope(self.hubert.name): self.hubert.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build([None, None, self.output_hidden_size]) # File: transformers-main/src/transformers/models/ibert/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_ibert': ['IBertConfig', 'IBertOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_ibert'] = ['IBertForMaskedLM', 'IBertForMultipleChoice', 'IBertForQuestionAnswering', 'IBertForSequenceClassification', 'IBertForTokenClassification', 'IBertModel', 'IBertPreTrainedModel'] if TYPE_CHECKING: from .configuration_ibert import IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/ibert/configuration_ibert.py """""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class IBertConfig(PretrainedConfig): model_type = 'ibert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', quant_mode=False, force_dequant='none', **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.quant_mode = quant_mode self.force_dequant = force_dequant class IBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) # File: transformers-main/src/transformers/models/ibert/modeling_ibert.py """""" import math from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_ibert import IBertConfig from .quant_modules import IntGELU, IntLayerNorm, IntSoftmax, QuantAct, QuantEmbedding, QuantLinear logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'kssteven/ibert-roberta-base' _CONFIG_FOR_DOC = 'IBertConfig' class IBertEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.embedding_bit = 8 self.embedding_act_bit = 16 self.act_bit = 8 self.ln_input_bit = 22 self.ln_output_bit = 32 self.word_embeddings = QuantEmbedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id, weight_bit=self.embedding_bit, quant_mode=self.quant_mode) self.token_type_embeddings = QuantEmbedding(config.type_vocab_size, config.hidden_size, weight_bit=self.embedding_bit, quant_mode=self.quant_mode) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.padding_idx = config.pad_token_id self.position_embeddings = QuantEmbedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx, weight_bit=self.embedding_bit, quant_mode=self.quant_mode) self.embeddings_act1 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) self.embeddings_act2 = QuantAct(self.embedding_act_bit, quant_mode=self.quant_mode) self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if position_ids is None: if input_ids is not None: position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: (inputs_embeds, inputs_embeds_scaling_factor) = self.word_embeddings(input_ids) else: inputs_embeds_scaling_factor = None (token_type_embeddings, token_type_embeddings_scaling_factor) = self.token_type_embeddings(token_type_ids) (embeddings, embeddings_scaling_factor) = self.embeddings_act1(inputs_embeds, inputs_embeds_scaling_factor, identity=token_type_embeddings, identity_scaling_factor=token_type_embeddings_scaling_factor) if self.position_embedding_type == 'absolute': (position_embeddings, position_embeddings_scaling_factor) = self.position_embeddings(position_ids) (embeddings, embeddings_scaling_factor) = self.embeddings_act1(embeddings, embeddings_scaling_factor, identity=position_embeddings, identity_scaling_factor=position_embeddings_scaling_factor) (embeddings, embeddings_scaling_factor) = self.LayerNorm(embeddings, embeddings_scaling_factor) embeddings = self.dropout(embeddings) (embeddings, embeddings_scaling_factor) = self.output_activation(embeddings, embeddings_scaling_factor) return (embeddings, embeddings_scaling_factor) def create_position_ids_from_inputs_embeds(self, inputs_embeds): input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) class IBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.quant_mode = config.quant_mode self.weight_bit = 8 self.bias_bit = 32 self.act_bit = 8 self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) self.key = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) self.value = QuantLinear(config.hidden_size, self.all_head_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) self.query_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.key_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.value_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type != 'absolute': raise ValueError("I-BERT only supports 'absolute' for `config.position_embedding_type`") self.softmax = IntSoftmax(self.act_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False): (mixed_query_layer, mixed_query_layer_scaling_factor) = self.query(hidden_states, hidden_states_scaling_factor) (mixed_key_layer, mixed_key_layer_scaling_factor) = self.key(hidden_states, hidden_states_scaling_factor) (mixed_value_layer, mixed_value_layer_scaling_factor) = self.value(hidden_states, hidden_states_scaling_factor) (query_layer, query_layer_scaling_factor) = self.query_activation(mixed_query_layer, mixed_query_layer_scaling_factor) (key_layer, key_layer_scaling_factor) = self.key_activation(mixed_key_layer, mixed_key_layer_scaling_factor) (value_layer, value_layer_scaling_factor) = self.value_activation(mixed_value_layer, mixed_value_layer_scaling_factor) query_layer = self.transpose_for_scores(query_layer) key_layer = self.transpose_for_scores(key_layer) value_layer = self.transpose_for_scores(value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) scale = math.sqrt(self.attention_head_size) attention_scores = attention_scores / scale if self.quant_mode: attention_scores_scaling_factor = query_layer_scaling_factor * key_layer_scaling_factor / scale else: attention_scores_scaling_factor = None if attention_mask is not None: attention_scores = attention_scores + attention_mask (attention_probs, attention_probs_scaling_factor) = self.softmax(attention_scores, attention_scores_scaling_factor) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) if attention_probs_scaling_factor is not None: context_layer_scaling_factor = attention_probs_scaling_factor * value_layer_scaling_factor else: context_layer_scaling_factor = None context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) (context_layer, context_layer_scaling_factor) = self.output_activation(context_layer, context_layer_scaling_factor) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) output_scaling_factor = (context_layer_scaling_factor, attention_probs_scaling_factor) if output_attentions else (context_layer_scaling_factor,) return (outputs, output_scaling_factor) class IBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.ln_input_bit = 22 self.ln_output_bit = 32 self.dense = QuantLinear(config.hidden_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): (hidden_states, hidden_states_scaling_factor) = self.dense(hidden_states, hidden_states_scaling_factor) hidden_states = self.dropout(hidden_states) (hidden_states, hidden_states_scaling_factor) = self.ln_input_act(hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.LayerNorm(hidden_states, hidden_states_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.output_activation(hidden_states, hidden_states_scaling_factor) return (hidden_states, hidden_states_scaling_factor) class IBertAttention(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.self = IBertSelfAttention(config) self.output = IBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False): (self_outputs, self_outputs_scaling_factor) = self.self(hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions) (attention_output, attention_output_scaling_factor) = self.output(self_outputs[0], self_outputs_scaling_factor[0], hidden_states, hidden_states_scaling_factor) outputs = (attention_output,) + self_outputs[1:] outputs_scaling_factor = (attention_output_scaling_factor,) + self_outputs_scaling_factor[1:] return (outputs, outputs_scaling_factor) class IBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.dense = QuantLinear(config.hidden_size, config.intermediate_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) if config.hidden_act != 'gelu': raise ValueError("I-BERT only supports 'gelu' for `config.hidden_act`") self.intermediate_act_fn = IntGELU(quant_mode=self.quant_mode, force_dequant=config.force_dequant) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) def forward(self, hidden_states, hidden_states_scaling_factor): (hidden_states, hidden_states_scaling_factor) = self.dense(hidden_states, hidden_states_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.intermediate_act_fn(hidden_states, hidden_states_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.output_activation(hidden_states, hidden_states_scaling_factor) return (hidden_states, hidden_states_scaling_factor) class IBertOutput(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.weight_bit = 8 self.bias_bit = 32 self.ln_input_bit = 22 self.ln_output_bit = 32 self.dense = QuantLinear(config.intermediate_size, config.hidden_size, bias=True, weight_bit=self.weight_bit, bias_bit=self.bias_bit, quant_mode=self.quant_mode, per_channel=True) self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode) self.LayerNorm = IntLayerNorm(config.hidden_size, eps=config.layer_norm_eps, output_bit=self.ln_output_bit, quant_mode=self.quant_mode, force_dequant=config.force_dequant) self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor): (hidden_states, hidden_states_scaling_factor) = self.dense(hidden_states, hidden_states_scaling_factor) hidden_states = self.dropout(hidden_states) (hidden_states, hidden_states_scaling_factor) = self.ln_input_act(hidden_states, hidden_states_scaling_factor, identity=input_tensor, identity_scaling_factor=input_tensor_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.LayerNorm(hidden_states, hidden_states_scaling_factor) (hidden_states, hidden_states_scaling_factor) = self.output_activation(hidden_states, hidden_states_scaling_factor) return (hidden_states, hidden_states_scaling_factor) class IBertLayer(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.act_bit = 8 self.seq_len_dim = 1 self.attention = IBertAttention(config) self.intermediate = IBertIntermediate(config) self.output = IBertOutput(config) self.pre_intermediate_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) self.pre_output_act = QuantAct(self.act_bit, quant_mode=self.quant_mode) def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False): (self_attention_outputs, self_attention_outputs_scaling_factor) = self.attention(hidden_states, hidden_states_scaling_factor, attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] attention_output_scaling_factor = self_attention_outputs_scaling_factor[0] outputs = self_attention_outputs[1:] (layer_output, layer_output_scaling_factor) = self.feed_forward_chunk(attention_output, attention_output_scaling_factor) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output, attention_output_scaling_factor): (attention_output, attention_output_scaling_factor) = self.pre_intermediate_act(attention_output, attention_output_scaling_factor) (intermediate_output, intermediate_output_scaling_factor) = self.intermediate(attention_output, attention_output_scaling_factor) (intermediate_output, intermediate_output_scaling_factor) = self.pre_output_act(intermediate_output, intermediate_output_scaling_factor) (layer_output, layer_output_scaling_factor) = self.output(intermediate_output, intermediate_output_scaling_factor, attention_output, attention_output_scaling_factor) return (layer_output, layer_output_scaling_factor) class IBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.quant_mode = config.quant_mode self.layer = nn.ModuleList([IBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, hidden_states_scaling_factor, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = None next_decoder_cache = None for (i, layer_module) in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, hidden_states_scaling_factor, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class IBertPooler(nn.Module): def __init__(self, config): super().__init__() self.quant_mode = config.quant_mode self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class IBertPreTrainedModel(PreTrainedModel): config_class = IBertConfig base_model_prefix = 'ibert' def _init_weights(self, module): if isinstance(module, (QuantLinear, nn.Linear)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (QuantEmbedding, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (IntLayerNorm, nn.LayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) def resize_token_embeddings(self, new_num_tokens=None): raise NotImplementedError('`resize_token_embeddings` is not supported for I-BERT.') IBERT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`IBertConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' IBERT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare I-BERT Model transformer outputting raw hidden-states without any specific head on top.', IBERT_START_DOCSTRING) class IBertModel(IBertPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.quant_mode = config.quant_mode self.embeddings = IBertEmbeddings(config) self.encoder = IBertEncoder(config) self.pooler = IBertPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') (batch_size, seq_length) = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) (embedding_output, embedding_output_scaling_factor) = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, embedding_output_scaling_factor, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('I-BERT Model with a `language modeling` head on top.', IBERT_START_DOCSTRING) class IBertForMaskedLM(IBertPreTrainedModel): _tied_weights_keys = ['lm_head.decoder.bias', 'lm_head.decoder.weight'] def __init__(self, config): super().__init__(config) self.ibert = IBertModel(config, add_pooling_layer=False) self.lm_head = IBertLMHead(config) self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask='') def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class IBertLMHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) x = self.decoder(x) return x def _tie_weights(self) -> None: if self.decoder.bias.device.type == 'meta': self.decoder.bias = self.bias else: self.bias = self.decoder.bias @add_start_docstrings('\n I-BERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ', IBERT_START_DOCSTRING) class IBertForSequenceClassification(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.classifier = IBertClassificationHead(config) self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n I-BERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', IBERT_START_DOCSTRING) class IBertForMultipleChoice(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.ibert = IBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.ibert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings('\n I-BERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n ', IBERT_START_DOCSTRING) class IBertForTokenClassification(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) class IBertClassificationHead(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): hidden_states = features[:, 0, :] hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings('\n I-BERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ', IBERT_START_DOCSTRING) class IBertForQuestionAnswering(IBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.ibert = IBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length')) @add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.ibert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) (start_logits, end_logits) = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # File: transformers-main/src/transformers/models/ibert/quant_modules.py import decimal import numpy as np import torch from torch import nn from torch.autograd import Function from ...utils import logging logger = logging.get_logger(__name__) class QuantEmbedding(nn.Module): def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, weight_bit=8, momentum=0.95, quant_mode=False): super().__init__() self.num_ = num_embeddings self.dim = embedding_dim self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim])) self.register_buffer('weight_scaling_factor', torch.zeros(1)) self.register_buffer('weight_integer', torch.zeros_like(self.weight)) self.weight_bit = weight_bit self.momentum = momentum self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def forward(self, x, positions=None, incremental_state=None): if not self.quant_mode: return (nn.functional.embedding(x, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse), None) w = self.weight w_transform = w.data.detach() w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False) self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor) emb_int = nn.functional.embedding(x, self.weight_integer, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) return (emb_int * self.weight_scaling_factor, self.weight_scaling_factor) class QuantAct(nn.Module): def __init__(self, activation_bit, act_range_momentum=0.95, per_channel=False, channel_len=None, quant_mode=False): super().__init__() self.activation_bit = activation_bit self.act_range_momentum = act_range_momentum self.quant_mode = quant_mode self.per_channel = per_channel self.percentile = False self.act_function = SymmetricQuantFunction.apply if not self.per_channel: self.register_buffer('x_min', torch.zeros(1)) self.register_buffer('x_max', torch.zeros(1)) self.register_buffer('act_scaling_factor', torch.zeros(1)) self.x_min -= 1e-05 self.x_max += 1e-05 else: raise NotImplementedError('per-channel mode is not currently supported for activation.') def __repr__(self): return f'{self.__class__.__name__}(activation_bit={self.activation_bit}, quant_mode: {self.quant_mode}, Act_min: {self.x_min.item():.2f}, Act_max: {self.x_max.item():.2f})' def forward(self, x, pre_act_scaling_factor=None, identity=None, identity_scaling_factor=None, specified_min=None, specified_max=None): x_act = x if identity is None else identity + x if self.training: assert not self.percentile, 'percentile mode is not currently supported for activation.' assert not self.per_channel, 'per-channel mode is not currently supported for activation.' x_min = x_act.data.min() x_max = x_act.data.max() assert x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0, 'NaN detected when computing min/max of the activation' if self.x_min.min() > -1.1e-05 and self.x_max.max() < 1.1e-05: self.x_min = self.x_min + x_min self.x_max = self.x_max + x_max elif self.act_range_momentum == -1: self.x_min = torch.min(self.x_min, x_min) self.x_max = torch.max(self.x_max, x_max) else: self.x_min = self.x_min * self.act_range_momentum + x_min * (1 - self.act_range_momentum) self.x_max = self.x_max * self.act_range_momentum + x_max * (1 - self.act_range_momentum) if not self.quant_mode: return (x_act, None) x_min = self.x_min if specified_min is None else specified_min x_max = self.x_max if specified_max is None else specified_max self.act_scaling_factor = symmetric_linear_quantization_params(self.activation_bit, x_min, x_max, per_channel=self.per_channel) if pre_act_scaling_factor is None: quant_act_int = self.act_function(x, self.activation_bit, self.percentile, self.act_scaling_factor) else: quant_act_int = FixedPointMul.apply(x, pre_act_scaling_factor, self.activation_bit, self.act_scaling_factor, identity, identity_scaling_factor) correct_output_scale = self.act_scaling_factor.view(-1) return (quant_act_int * correct_output_scale, self.act_scaling_factor) class QuantLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.zeros([out_features, in_features])) self.register_buffer('weight_integer', torch.zeros_like(self.weight)) self.register_buffer('fc_scaling_factor', torch.zeros(self.out_features)) if bias: self.bias = nn.Parameter(torch.zeros(out_features)) self.register_buffer('bias_integer', torch.zeros_like(self.bias)) self.weight_bit = weight_bit self.quant_mode = quant_mode self.per_channel = per_channel self.bias_bit = bias_bit self.quant_mode = quant_mode self.percentile_mode = False self.weight_function = SymmetricQuantFunction.apply def __repr__(self): s = super().__repr__() s = f'({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})' return s def forward(self, x, prev_act_scaling_factor=None): if not self.quant_mode: return (nn.functional.linear(x, weight=self.weight, bias=self.bias), None) assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), 'Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. Please add a QuantAct layer with `per_channel = True` before this QuantAct layer' w = self.weight w_transform = w.data.detach() if self.per_channel: (w_min, _) = torch.min(w_transform, dim=1, out=None) (w_max, _) = torch.max(w_transform, dim=1, out=None) else: w_min = w_transform.min().expand(1) w_max = w_transform.max().expand(1) self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel) self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor) bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor if self.bias is not None: self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor) prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1) x_int = x / prev_act_scaling_factor return (nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor) class IntGELU(nn.Module): def __init__(self, quant_mode=True, force_dequant='none'): super().__init__() self.quant_mode = quant_mode if force_dequant in ['nonlinear', 'gelu']: logger.info('Force dequantize gelu') self.quant_mode = False if not self.quant_mode: self.activation_fn = nn.GELU() self.k = 1.4142 self.const = 14 self.coeff = [-0.2888, -1.769, 1] self.coeff[2] /= self.coeff[0] def int_erf(self, x_int, scaling_factor): b_int = torch.floor(self.coeff[1] / scaling_factor) c_int = torch.floor(self.coeff[2] / scaling_factor ** 2) sign = torch.sign(x_int) abs_int = torch.min(torch.abs(x_int), -b_int) y_int = sign * ((abs_int + b_int) ** 2 + c_int) scaling_factor = scaling_factor ** 2 * self.coeff[0] y_int = floor_ste.apply(y_int / 2 ** self.const) scaling_factor = scaling_factor * 2 ** self.const return (y_int, scaling_factor) def forward(self, x, scaling_factor=None): if not self.quant_mode: return (self.activation_fn(x), None) x_int = x / scaling_factor (sigmoid_int, sigmoid_scaling_factor) = self.int_erf(x_int, scaling_factor / self.k) shift_int = 1.0 // sigmoid_scaling_factor x_int = x_int * (sigmoid_int + shift_int) scaling_factor = scaling_factor * sigmoid_scaling_factor / 2 return (x_int * scaling_factor, scaling_factor) class IntSoftmax(nn.Module): def __init__(self, output_bit, quant_mode=False, force_dequant='none'): super().__init__() self.output_bit = output_bit self.max_bit = 32 self.quant_mode = quant_mode if force_dequant in ['nonlinear', 'softmax']: logger.info('Force dequantize softmax') self.quant_mode = False self.act = QuantAct(16, quant_mode=self.quant_mode) self.x0 = -0.6931 self.const = 30 self.coef = [0.35815147, 0.96963238, 1.0] self.coef[1] /= self.coef[0] self.coef[2] /= self.coef[0] def int_polynomial(self, x_int, scaling_factor): with torch.no_grad(): b_int = torch.floor(self.coef[1] / scaling_factor) c_int = torch.floor(self.coef[2] / scaling_factor ** 2) z = (x_int + b_int) * x_int + c_int scaling_factor = self.coef[0] * scaling_factor ** 2 return (z, scaling_factor) def int_exp(self, x_int, scaling_factor): with torch.no_grad(): x0_int = torch.floor(self.x0 / scaling_factor) x_int = torch.max(x_int, self.const * x0_int) q = floor_ste.apply(x_int / x0_int) r = x_int - x0_int * q (exp_int, exp_scaling_factor) = self.int_polynomial(r, scaling_factor) exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0) scaling_factor = exp_scaling_factor / 2 ** self.const return (exp_int, scaling_factor) def forward(self, x, scaling_factor): if not self.quant_mode: return (nn.functional.softmax(x, dim=-1), None) x_int = x / scaling_factor (x_int_max, _) = x_int.max(dim=-1, keepdim=True) x_int = x_int - x_int_max (exp_int, exp_scaling_factor) = self.int_exp(x_int, scaling_factor) (exp, exp_scaling_factor) = self.act(exp_int, exp_scaling_factor) exp_int = exp / exp_scaling_factor exp_int_sum = exp_int.sum(dim=-1, keepdim=True) factor = floor_ste.apply(2 ** self.max_bit / exp_int_sum) exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit)) scaling_factor = 1 / 2 ** self.output_bit return (exp_int * scaling_factor, scaling_factor) class IntLayerNorm(nn.Module): def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'): super().__init__() self.normalized_shape = normalized_shape self.eps = eps self.weight = nn.Parameter(torch.zeros(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.quant_mode = quant_mode if force_dequant in ['nonlinear', 'layernorm']: logger.info('Force dequantize layernorm') self.quant_mode = False self.register_buffer('shift', torch.zeros(1)) self.output_bit = output_bit self.max_bit = 32 self.dim_sqrt = None self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode) def set_shift(self, y_int): with torch.no_grad(): y_sq_int = y_int ** 2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) shift = torch.log2(torch.sqrt(var_int / 2 ** self.max_bit)).ceil().max() shift_old = self.shift self.shift = torch.max(self.shift, shift) logger.info(f'Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}') def overflow_fallback(self, y_int): self.set_shift(y_int) y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift) y_sq_int = y_int_shifted ** 2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) return var_int def forward(self, x, scaling_factor=None): if not self.quant_mode: mean = x.mean(axis=2, keepdim=True) y = x - mean var = torch.mean(y ** 2, axis=2, keepdim=True) x = y / torch.sqrt(self.eps + var) x = x * self.weight + self.bias return (x, None) if self.dim_sqrt is None: n = torch.tensor(x.shape[2], dtype=torch.float) self.dim_sqrt = torch.sqrt(n).to(x.device) x_int = x / scaling_factor mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True)) y_int = x_int - mean_int y_int_shifted = floor_ste.apply(y_int / 2 ** self.shift) y_sq_int = y_int_shifted ** 2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) if self.training: if var_int.max() >= 2 ** self.max_bit: var_int = self.overflow_fallback(y_int) assert var_int.max() < 2 ** self.max_bit + 0.1, 'Error detected in overflow handling: `var_int` exceeds `self.max_bit` (the maximum possible bit width)' std_int = floor_ste.apply(torch.sqrt(var_int)) * 2 ** self.shift factor = floor_ste.apply(2 ** 31 / std_int) y_int = floor_ste.apply(y_int * factor / 2) scaling_factor = self.dim_sqrt / 2 ** 30 bias = self.bias.data.detach() / self.weight.data.detach() bias_int = floor_ste.apply(bias / scaling_factor) y_int = y_int + bias_int scaling_factor = scaling_factor * self.weight x = y_int * scaling_factor return (x, scaling_factor) def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False): input_length = input.shape[0] lower_index = round(input_length * (1 - lower_percentile * 0.01)) upper_index = round(input_length * upper_percentile * 0.01) upper_bound = torch.kthvalue(input, k=upper_index).values if lower_percentile == 0: lower_bound = upper_bound * 0 else: lower_bound = -torch.kthvalue(-input, k=lower_index).values if not output_tensor: lower_bound = lower_bound.item() upper_bound = upper_bound.item() return (lower_bound, upper_bound) def linear_quantize(input, scale, zero_point, inplace=False): if len(input.shape) == 4: scale = scale.view(-1, 1, 1, 1) zero_point = zero_point.view(-1, 1, 1, 1) elif len(input.shape) == 2: scale = scale.view(-1, 1) zero_point = zero_point.view(-1, 1) else: scale = scale.view(-1) zero_point = zero_point.view(-1) if inplace: input.mul_(1.0 / scale).add_(zero_point).round_() return input return torch.round(1.0 / scale * input + zero_point) def symmetric_linear_quantization_params(num_bits, saturation_min, saturation_max, per_channel=False): with torch.no_grad(): n = 2 ** (num_bits - 1) - 1 if per_channel: (scale, _) = torch.max(torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1) scale = torch.clamp(scale, min=1e-08) / n else: scale = max(saturation_min.abs(), saturation_max.abs()) scale = torch.clamp(scale, min=1e-08) / n return scale class SymmetricQuantFunction(Function): @staticmethod def forward(ctx, x, k, percentile_mode, scale): zero_point = torch.tensor(0.0).to(scale.device) n = 2 ** (k - 1) - 1 new_quant_x = linear_quantize(x, scale, zero_point, inplace=False) new_quant_x = torch.clamp(new_quant_x, -n, n - 1) ctx.scale = scale return new_quant_x @staticmethod def backward(ctx, grad_output): scale = ctx.scale if len(grad_output.shape) == 4: scale = scale.view(-1, 1, 1, 1) elif len(grad_output.shape) == 2: scale = scale.view(-1, 1) else: scale = scale.view(-1) return (grad_output.clone() / scale, None, None, None, None) class floor_ste(Function): @staticmethod def forward(ctx, x): return torch.floor(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() class round_ste(Function): @staticmethod def forward(ctx, x): return torch.round(x) @staticmethod def backward(ctx, grad_output): return grad_output.clone() def batch_frexp(inputs, max_bit=31): shape_of_input = inputs.size() inputs = inputs.view(-1) (output_m, output_e) = np.frexp(inputs.cpu().numpy()) tmp_m = [] for m in output_m: int_m_shifted = int(decimal.Decimal(m * 2 ** max_bit).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP)) tmp_m.append(int_m_shifted) output_m = np.array(tmp_m) output_e = float(max_bit) - output_e return (torch.from_numpy(output_m).to(inputs.device).view(shape_of_input), torch.from_numpy(output_e).to(inputs.device).view(shape_of_input)) class FixedPointMul(Function): @staticmethod def forward(ctx, pre_act, pre_act_scaling_factor, bit_num, z_scaling_factor, identity=None, identity_scaling_factor=None): if len(pre_act_scaling_factor.shape) == 3: reshape = lambda x: x else: reshape = lambda x: x.view(1, 1, -1) ctx.identity = identity n = 2 ** (bit_num - 1) - 1 with torch.no_grad(): pre_act_scaling_factor = reshape(pre_act_scaling_factor) if identity is not None: identity_scaling_factor = reshape(identity_scaling_factor) ctx.z_scaling_factor = z_scaling_factor z_int = torch.round(pre_act / pre_act_scaling_factor) _A = pre_act_scaling_factor.type(torch.double) _B = z_scaling_factor.type(torch.float).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) (m, e) = batch_frexp(new_scale) output = z_int.type(torch.double) * m.type(torch.double) output = torch.round(output / 2.0 ** e) if identity is not None: wx_int = torch.round(identity / identity_scaling_factor) _A = identity_scaling_factor.type(torch.double) _B = z_scaling_factor.type(torch.float).type(torch.double) new_scale = _A / _B new_scale = reshape(new_scale) (m1, e1) = batch_frexp(new_scale) output1 = wx_int.type(torch.double) * m1.type(torch.double) output1 = torch.round(output1 / 2.0 ** e1) output = output1 + output return torch.clamp(output.type(torch.float), -n - 1, n) @staticmethod def backward(ctx, grad_output): identity_grad = None if ctx.identity is not None: identity_grad = grad_output.clone() / ctx.z_scaling_factor return (grad_output.clone() / ctx.z_scaling_factor, None, None, None, None, identity_grad, None) # File: transformers-main/src/transformers/models/idefics/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available _import_structure = {'configuration_idefics': ['IdeficsConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_idefics'] = ['IdeficsImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_idefics'] = ['IdeficsForVisionText2Text', 'IdeficsModel', 'IdeficsPreTrainedModel'] _import_structure['processing_idefics'] = ['IdeficsProcessor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_tf_idefics'] = ['TFIdeficsForVisionText2Text', 'TFIdeficsModel', 'TFIdeficsPreTrainedModel'] if TYPE_CHECKING: from .configuration_idefics import IdeficsConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_idefics import IdeficsImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_idefics import IdeficsForVisionText2Text, IdeficsModel, IdeficsPreTrainedModel from .processing_idefics import IdeficsProcessor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_idefics import TFIdeficsForVisionText2Text, TFIdeficsModel, TFIdeficsPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/idefics/configuration_idefics.py """""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class IdeficsVisionConfig(PretrainedConfig): model_type = 'idefics' attribute_map = {'hidden_size': 'embed_dim'} def __init__(self, embed_dim=768, image_size=224, intermediate_size=5120, patch_size=14, num_hidden_layers=32, num_attention_heads=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): self.embed_dim = embed_dim self.image_size = image_size self.intermediate_size = intermediate_size self.patch_size = patch_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.hidden_act = hidden_act super().__init__(**kwargs) class IdeficsPerceiverConfig(PretrainedConfig): model_type = 'idefics' def __init__(self, use_resampler=False, resampler_n_latents=64, resampler_depth=6, resampler_n_heads=16, resampler_head_dim=96, qk_layer_norms_perceiver=False, **kwargs): self.use_resampler = use_resampler self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.resampler_head_dim = resampler_head_dim self.qk_layer_norms_perceiver = qk_layer_norms_perceiver super().__init__(**kwargs) class IdeficsConfig(PretrainedConfig): model_type = 'idefics' is_composition = False def __init__(self, vocab_size=32000, additional_vocab_size=0, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, dropout=0.0, hidden_act='silu', initializer_range=0.02, alpha_initializer='zeros', alphas_initializer_range=0.0, alpha_type='float', rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, cross_layer_interval=1, qk_layer_norms=False, freeze_text_layers=True, freeze_text_module_exceptions=[], freeze_lm_head=False, freeze_vision_layers=True, freeze_vision_module_exceptions=[], use_resampler=False, vision_config=None, perceiver_config=None, **kwargs): self.vocab_size = vocab_size self.additional_vocab_size = additional_vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.hidden_act = hidden_act self.initializer_range = initializer_range self.alpha_initializer = alpha_initializer self.alphas_initializer_range = alphas_initializer_range self.alpha_type = alpha_type self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.cross_layer_interval = cross_layer_interval self.qk_layer_norms = qk_layer_norms self.freeze_vision_layers = freeze_vision_layers self.freeze_text_layers = freeze_text_layers self.freeze_text_module_exceptions = freeze_text_module_exceptions self.freeze_vision_module_exceptions = freeze_vision_module_exceptions self.freeze_lm_head = freeze_lm_head self.use_resampler = use_resampler if perceiver_config is None: self.perceiver_config = IdeficsPerceiverConfig() elif isinstance(perceiver_config, dict): self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config) elif isinstance(perceiver_config, IdeficsPerceiverConfig): self.perceiver_config = perceiver_config if vision_config is None: self.vision_config = IdeficsVisionConfig() elif isinstance(vision_config, dict): self.vision_config = IdeficsVisionConfig(**vision_config) elif isinstance(vision_config, IdeficsVisionConfig): self.vision_config = vision_config super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) # File: transformers-main/src/transformers/models/idefics/image_processing_idefics.py """""" from typing import Callable, Dict, List, Optional, Union from PIL import Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, is_torch_available IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073] IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711] def convert_to_rgb(image): if image.mode == 'RGB': return image image_rgba = image.convert('RGBA') background = Image.new('RGBA', image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert('RGB') return alpha_composite class IdeficsImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, image_size: int=224, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, image_num_channels: Optional[int]=3, **kwargs) -> None: super().__init__(**kwargs) self.image_size = image_size self.image_num_channels = image_num_channels self.image_mean = image_mean self.image_std = image_std def preprocess(self, images: ImageInput, image_num_channels: Optional[int]=3, image_size: Optional[Dict[str, int]]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, transform: Callable=None, return_tensors: Optional[Union[str, TensorType]]=TensorType.PYTORCH, **kwargs) -> TensorType: image_size = image_size if image_size is not None else self.image_size image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = (image_size, image_size) if isinstance(images, list) and len(images) == 0: return [] images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') if transform is not None: if not is_torch_available(): raise ImportError('To pass in `transform` torch must be installed') import torch images = [transform(x) for x in images] return torch.stack(images) images = [convert_to_rgb(x) for x in images] images = [to_numpy_array(x) for x in images] images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images] images = [self.rescale(image=image, scale=1 / 255) for image in images] images = [self.normalize(x, mean=image_mean, std=image_std) for x in images] images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images] images = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)['pixel_values'] return images # File: transformers-main/src/transformers/models/idefics/modeling_idefics.py """""" from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ModelOutput from ...modeling_utils import PretrainedConfig from ...pytorch_utils import ALL_LAYERNORM_LAYERS from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_idefics import IdeficsConfig from .perceiver import IdeficsPerceiverResampler from .vision import IdeficsVisionTransformer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'IdeficsConfig' def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int): if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask @dataclass class IdeficsBaseModelOutputWithPast(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class IdeficsCausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None def expand_inputs_for_generation(input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs): expanded_return_idx = torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) input_ids = input_ids.index_select(0, expanded_return_idx) model_kwargs['pixel_values'] = model_kwargs.get('pixel_values', None) model_kwargs['image_encoder_embeddings'] = model_kwargs.get('image_encoder_embeddings', None) model_kwargs['perceiver_embeddings'] = model_kwargs.get('perceiver_embeddings', None) model_kwargs['image_attention_mask'] = model_kwargs.get('image_attention_mask', None) if 'token_type_ids' in model_kwargs: token_type_ids = model_kwargs['token_type_ids'] model_kwargs['token_type_ids'] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs['attention_mask'] = attention_mask.index_select(0, expanded_return_idx) if model_kwargs['image_attention_mask'] is not None: model_kwargs['image_attention_mask'] = model_kwargs['image_attention_mask'].index_select(0, expanded_return_idx) if model_kwargs['pixel_values'] is not None: model_kwargs['pixel_values'] = model_kwargs['pixel_values'].index_select(0, expanded_return_idx) elif model_kwargs['image_encoder_embeddings'] is not None: model_kwargs['image_encoder_embeddings'] = model_kwargs['image_encoder_embeddings'].index_select(0, expanded_return_idx) elif model_kwargs['perceiver_embeddings'] is not None: model_kwargs['perceiver_embeddings'] = model_kwargs['perceiver_embeddings'].index_select(0, expanded_return_idx) return (input_ids, model_kwargs) def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) cache_position = kwargs.get('cache_position', None) if past_key_values is not None: if input_ids.shape[1] != cache_position.shape[0]: input_ids = input_ids[:, cache_position] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) position_ids = position_ids.clone(memory_format=torch.contiguous_format) pixel_values = kwargs.get('pixel_values', None) image_encoder_embeddings = kwargs.get('image_encoder_embeddings', None) perceiver_embeddings = kwargs.get('perceiver_embeddings', None) image_attention_mask = kwargs.get('image_attention_mask', None) interpolate_pos_encoding = kwargs.get('interpolate_pos_encoding', False) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'cache_position': cache_position, 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'pixel_values': pixel_values, 'image_encoder_embeddings': image_encoder_embeddings, 'perceiver_embeddings': perceiver_embeddings, 'image_attention_mask': image_attention_mask, 'interpolate_pos_encoding': interpolate_pos_encoding} def freeze_model(model, module_exceptions=[]): mapping = {'LayerNorm': nn.LayerNorm, 'Linear': nn.Linear, 'Embedding': nn.Embedding} module_exceptions_mapped = [mapping[m] for m in module_exceptions] for module in model.modules(): if module_exceptions and any((isinstance(module, t) for t in module_exceptions_mapped)): module.requires_grad_(True) else: module.requires_grad_(False) return model class IdeficsDecoupledEmbedding(nn.Embedding): def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, device=None, dtype=None, padding_idx=None, **kwargs) -> None: if padding_idx is not None and padding_idx > num_embeddings: raise ValueError(f'padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}') super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, padding_idx=padding_idx, **kwargs) self.num_embeddings = num_embeddings self.padding_idx = padding_idx self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.weight.requires_grad_(False) if self.num_additional_embeddings > 0: self.additional_embedding = nn.Embedding(num_embeddings=self.num_additional_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype) def forward(self, input_ids): if self.num_additional_embeddings == 0: return F.embedding(input_ids, self.weight) input_ids = input_ids.clone() additional_vocab_indices = torch.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = input_ids[additional_vocab_indices] additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) input_ids[additional_vocab_indices] = 0 full_vector = F.embedding(input_ids, self.weight) full_vector[additional_vocab_indices] = additional_embeddings return full_vector def extra_repr(self) -> str: return 'num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}'.format(self.num_embeddings, self.num_additional_embeddings, self.embedding_dim, self.partially_freeze) class IdeficsDecoupledLinear(nn.Linear): def __init__(self, in_features: int, out_features: int, out_additional_features: int=0, bias: bool=True, partially_freeze: bool=True, device=None, dtype=None) -> None: super().__init__(in_features, out_features, bias, device, dtype) self.out_additional_features = out_additional_features self.partially_freeze = partially_freeze self.in_features = in_features self.out_features = out_features if partially_freeze: self.weight.requires_grad_(False) if bias: self.bias.requires_grad_(False) if out_additional_features > 0: self.additional_fc = nn.Linear(in_features=in_features, out_features=out_additional_features, bias=bias, device=device, dtype=dtype) def forward(self, input: torch.Tensor) -> torch.Tensor: output = F.linear(input, self.weight, self.bias) if self.out_additional_features > 0: additional_features = self.additional_fc(input) output = torch.cat((output, additional_features), -1) return output def extra_repr(self) -> str: return 'in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}'.format(self.in_features, self.out_features, self.out_additional_features, self.bias is not None, self.partially_freeze) class IdeficsRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm) class IdeficsEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) self._set_cos_sin_cache(seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) freqs = torch.einsum('i,j->ij', t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False) self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return (self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype)) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): cos = cos[position_ids].unsqueeze(unsqueeze_dim) sin = sin[position_ids].unsqueeze(unsqueeze_dim) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class IdeficsMLP(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class IdeficsAttention(nn.Module): def __init__(self, hidden_size: int, num_heads: int, dropout: float=0.0, is_cross_attention: bool=False, config: PretrainedConfig=None, qk_layer_norms: bool=False, layer_idx: int=None): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.dropout = dropout self.is_causal = True self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') if self.head_dim * num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {num_heads}).') self.is_cross_attention = is_cross_attention if not hasattr(nn.functional, 'scaled_dot_product_attention'): raise ValueError('this model requires pytorch 2.0 or higher') if self.is_cross_attention: kv_input_dim = self.hidden_size if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim self.q_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False) else: self.q_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(num_heads * self.head_dim, hidden_size, bias=False) self.rotary_emb = IdeficsEmbedding(self.head_dim) self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = self.is_cross_attention or key_value_states is not None (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) if not is_cross_attention: key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) else: (_, kv_len, _) = key_value_states.size() key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += cache_position[0] if not is_cross_attention: (cos, sin) = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len)) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: cache_kwargs = {'cache_position': cache_position} (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') if query_states.device.type == 'cuda' and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() is_causal = True if self.is_causal and attention_mask is None and (q_len > 1) else False attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=self.dropout if self.training else 0.0, is_causal=is_causal) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once('attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead') return (attn_output, attn_weights, past_key_value) class IdeficsDecoderLayer(nn.Module): def __init__(self, config: IdeficsConfig, layer_idx: int=None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = IdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, layer_idx=layer_idx) self.mlp = IdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act) self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.dropout = config.dropout def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class IdeficsGatedCrossAttentionLayer(nn.Module): def __init__(self, config: IdeficsConfig): super().__init__() self.hidden_size = config.hidden_size self.cross_attn = IdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms) self.mlp = IdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act) self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.config = config.dropout self.act_cross_attn = nn.Tanh() self.act_dense = nn.Tanh() if config.alpha_initializer == 'zeros': if config.alpha_type == 'vector': self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size)) elif config.alpha_type == 'float': self.alpha_cross_attn = nn.Parameter(torch.zeros(1)) self.alpha_dense = nn.Parameter(torch.zeros(1)) else: raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})') elif config.alpha_initializer == 'ones': if config.alpha_type == 'vector': self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size)) self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size)) elif config.alpha_type == 'float': self.alpha_cross_attn = nn.Parameter(torch.ones(1)) self.alpha_dense = nn.Parameter(torch.ones(1)) else: raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})') elif config.alpha_initializer in {'normal', 'gaussian', 'random'}: if config.alpha_type == 'vector': self.alpha_cross_attn = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))) self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))) elif config.alpha_type == 'float': self.alpha_cross_attn = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=1)) self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=1)) else: raise ValueError(f'Unknown value for `alpha_type` ({config.alpha_type})') else: raise NotImplementedError(f'Alpha initialization scheme {config.alpha_initializer} not yet implemented!') if not (hasattr(self, 'alpha_cross_attn') and hasattr(self, 'alpha_dense')): raise ValueError('Alpha parameters not initialized correctly!') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, image_hidden_states: Optional[torch.Tensor]=None, image_attention_mask: Optional[torch.Tensor]=None, cross_attention_gate: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[torch.Tensor]]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: if image_hidden_states is None: raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.') if cross_attention_gate is None: raise ValueError('`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images.') if past_key_value is not None: raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.') residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0) hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`IdeficsConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare LLaMA Model outputting raw hidden-states without any specific head on top.', LLAMA_START_DOCSTRING) class IdeficsPreTrainedModel(PreTrainedModel): config_class = IdeficsConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['IdeficsDecoderLayer', 'IdeficsGatedCrossAttentionLayer'] _supports_sdpa = True _supports_cache_class = True _supports_static_cache = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @classmethod def _check_and_enable_sdpa(cls, config, hard_check_only: bool=False) -> PretrainedConfig: _is_bettertransformer = getattr(cls, 'use_bettertransformer', False) if _is_bettertransformer: return config if not hard_check_only: config._attn_implementation = 'sdpa' return config LLAMA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n" @add_start_docstrings('The bare LLaMA Model outputting raw hidden-states without any specific head on top.', LLAMA_START_DOCSTRING) class IdeficsModel(IdeficsPreTrainedModel): def __init__(self, config: IdeficsConfig): super().__init__(config) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = IdeficsDecoupledEmbedding(num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, padding_idx=self.padding_idx) self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = IdeficsVisionTransformer(config.vision_config) if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = IdeficsPerceiverResampler(config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents) self.layers = nn.ModuleList([IdeficsDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = nn.ModuleList([IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)]) self.gradient_checkpointing = False self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_init() self.freeze_relevant_params(config) def freeze_relevant_params(self, config=None): if config is None: config = self.config if config.freeze_text_layers: self.freeze_text_layers(config.freeze_text_module_exceptions) if config.freeze_vision_layers: freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) def freeze_text_layers(self, module_exceptions=[]): for module in [self.layers, self.norm]: freeze_model(module, module_exceptions=module_exceptions) def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, IdeficsBaseModelOutputWithPast]: device = input_ids.device if input_ids is not None else inputs_embeds.device output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) return_legacy_cache = False if use_cache and (not isinstance(past_key_values, Cache)): if not self.training: logger.warning_once('We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.45. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/internal/generation_utils#transformers.Cache)') return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) (batch_size, seq_length, _) = inputs_embeds.shape past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 seq_length_with_past = seq_length + past_key_values_length if cache_position is None: cache_position = torch.arange(past_key_values_length, past_key_values_length + inputs_embeds.shape[1], device=inputs_embeds.device) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) elif position_ids is None: position_ids = torch.arange(past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2: raise ValueError('Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None.') elif pixel_values is not None: pixel_values = pixel_values.to(dtype=self.dtype, device=device) (batch_size, num_images) = pixel_values.shape[:2] pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:]) image_hidden_states = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state elif image_encoder_embeddings is not None: (batch_size, num_images, image_seq_len, image_hidden_size) = image_encoder_embeddings.size() image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device) image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size) if self.config.use_resampler: if perceiver_embeddings is None: perceiver_embeddings = self.perceiver_resampler(image_hidden_states) (image_seq_len, image_hidden_size) = (perceiver_embeddings.size(1), perceiver_embeddings.size(2)) else: (batch_size, num_images, image_seq_len, image_hidden_size) = perceiver_embeddings.size() image_hidden_states = perceiver_embeddings elif perceiver_embeddings is None: (image_seq_len, image_hidden_size) = (image_hidden_states.size(1), image_hidden_states.size(2)) else: raise ValueError('If `perceiver_embeddings` are passed, use_resampler should be True') image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size) text_seq_len = image_attention_mask.size(1) image_attention_mask = image_attention_mask.unsqueeze(-1) image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len) image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len) if image_hidden_states is not None: (image_batch_size, image_sequence_length, _) = image_hidden_states.size() image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = torch.ones(image_hidden_shape, device=device) image_attention_mask = self.invert_attention_mask(image_attention_mask) else: image_attention_mask = None cross_attention_gate = (image_attention_mask == 0.0).any(dim=-1).to(dtype=self.dtype).squeeze(dim=1).to(device) if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device) attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) def vblock(main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, layer_idx, cross_layer_interval, gated_cross_attn_layers, cache_position): if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock(hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None) hidden_states = outputs[0] layer_outputs = main_block(hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position) return layer_outputs if self.gradient_checkpointing and self.training: past_key_values = None if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(vblock, decoder_layer, hidden_states, attention_mask, position_ids, past_key_values, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, idx, self.cross_layer_interval, self.gated_cross_attn_layers, cache_position) else: layer_outputs = vblock(decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers, cache_position=cache_position) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size) if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] if v is not None)) return IdeficsBaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states) def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_static_cache = isinstance(past_key_values, StaticCache) if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None (dtype, device) = (input_tensor.dtype, input_tensor.device) min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: target_length = past_key_values.get_max_length() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, min_dtype=min_dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type == 'cuda') and (not output_attentions): causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask class IdeficsForVisionText2Text(IdeficsPreTrainedModel): _keys_to_ignore_on_load_missing = ['lm_head.weight'] _tied_weights_keys = ['model.embed_tokens.weight', 'lm_head.weight'] def __init__(self, config, vision_model=None): super().__init__(config) self.model = IdeficsModel(config) self.lm_head = IdeficsDecoupledLinear(in_features=config.hidden_size, out_features=config.vocab_size, out_additional_features=config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head) self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def tie_weights(self): output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, 'tie_word_embeddings', True): output_embeddings.weight = input_embeddings.weight if input_embeddings.num_additional_embeddings > 0: assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): output_embeddings.out_features = input_embeddings.num_embeddings if hasattr(output_embeddings, 'out_additional_features') and hasattr(input_embeddings, 'num_additional_embeddings'): output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_encoder_embeddings: Optional[torch.FloatTensor]=None, perceiver_embeddings: Optional[torch.FloatTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, IdeficsCausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, cache_position=cache_position) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return IdeficsCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): image_hidden_states = kwargs.pop('image_hidden_states', None) if image_hidden_states is not None: if self.config.use_resampler: kwargs['perceiver_embeddings'] = image_hidden_states else: kwargs['image_encoder_embeddings'] = image_hidden_states kwargs['pixel_values'] = None inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ['token_type_ids'] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation(*args, **model_kwargs): return expand_inputs_for_generation(*args, **model_kwargs) def _update_model_kwargs_for_generation(self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool=False, **kwargs) -> Dict[str, Any]: model_kwargs = super()._update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder, **kwargs) if 'image_attention_mask' in model_kwargs: image_attention_mask = model_kwargs['image_attention_mask'] last_mask = image_attention_mask[:, -1, :].unsqueeze(1) model_kwargs['image_attention_mask'] = last_mask model_kwargs['image_hidden_states'] = outputs.image_hidden_states return model_kwargs @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple((past_state.index_select(0, beam_idx) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/idefics/modeling_tf_idefics.py """""" from __future__ import annotations from dataclasses import dataclass from typing import List, Optional, Tuple, Union import tensorflow as tf from ... import TFPreTrainedModel from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ModelOutput from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFModelInputType, keras_serializable, shape_list, unpack_inputs from ...tf_utils import invert_attention_mask, scaled_dot_product_attention from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_idefics import IdeficsConfig from .perceiver_tf import TFIdeficsPerceiverResampler from .vision_tf import TFIdeficsVisionTransformer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'IdeficsConfig' @dataclass class TFIdeficsBaseModelOutputWithPast(ModelOutput): last_hidden_state: tf.Tensor = None past_key_values: Optional[Tuple[Tuple[tf.Tensor]]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None image_hidden_states: Optional[Tuple[tf.Tensor]] = None @dataclass class TFIdeficsCausalLMOutputWithPast(ModelOutput): loss: Optional[tf.Tensor] = None logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None image_hidden_states: Optional[Tuple[tf.Tensor]] = None def expand_inputs_for_generation(input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs): expanded_return_idx = tf.reshape(tf.repeat(tf.range(tf.shape(input_ids)[0]), expand_size), [-1]) input_ids = tf.gather(input_ids, expanded_return_idx) model_kwargs['pixel_values'] = model_kwargs.get('pixel_values', None) model_kwargs['image_encoder_embeddings'] = model_kwargs.get('image_encoder_embeddings', None) model_kwargs['perceiver_embeddings'] = model_kwargs.get('perceiver_embeddings', None) model_kwargs['image_attention_mask'] = model_kwargs.get('image_attention_mask', None) if 'token_type_ids' in model_kwargs: token_type_ids = model_kwargs['token_type_ids'] model_kwargs['token_type_ids'] = tf.gather(token_type_ids, expanded_return_idx) if attention_mask is not None: model_kwargs['attention_mask'] = tf.gather(attention_mask, expanded_return_idx) if model_kwargs['image_attention_mask'] is not None: model_kwargs['image_attention_mask'] = tf.gather(model_kwargs['image_attention_mask'], expanded_return_idx) if model_kwargs['pixel_values'] is not None: model_kwargs['pixel_values'] = tf.gather(model_kwargs['pixel_values'], expanded_return_idx) elif model_kwargs['image_encoder_embeddings'] is not None: model_kwargs['image_encoder_embeddings'] = tf.gather(model_kwargs['image_encoder_embeddings'], expanded_return_idx) elif model_kwargs['perceiver_embeddings'] is not None: model_kwargs['perceiver_embeddings'] = tf.gather(model_kwargs['perceiver_embeddings'], expanded_return_idx) return (input_ids, model_kwargs) def update_model_kwargs_for_generation(outputs, model_kwargs): if 'past_key_values' in outputs: model_kwargs['past_key_values'] = outputs.past_key_values else: model_kwargs['past_key_values'] = None if 'token_type_ids' in model_kwargs: token_type_ids = model_kwargs['token_type_ids'] model_kwargs['token_type_ids'] = tf.concat([token_type_ids, token_type_ids[:, -1:, ...]], axis=-1) if 'attention_mask' in model_kwargs: attention_mask = model_kwargs['attention_mask'] model_kwargs['attention_mask'] = tf.concat([attention_mask, tf.ones_like(attention_mask[:, -1:, ...])], axis=-1) if 'image_attention_mask' in model_kwargs: image_attention_mask = model_kwargs['image_attention_mask'] last_mask = image_attention_mask[:, -1:, ...] model_kwargs['image_attention_mask'] = last_mask model_kwargs['image_hidden_states'] = outputs.image_hidden_states return model_kwargs def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values is not None: input_ids = input_ids[:, -1:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -1:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int64), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) if past_key_values is not None: position_ids = position_ids[:, -1:] pixel_values = kwargs.get('pixel_values', None) image_encoder_embeddings = kwargs.get('image_encoder_embeddings', None) perceiver_embeddings = kwargs.get('perceiver_embeddings', None) image_attention_mask = kwargs.get('image_attention_mask', None) interpolate_pos_encoding = kwargs.get('interpolate_pos_encoding', False) return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'pixel_values': pixel_values, 'image_encoder_embeddings': image_encoder_embeddings, 'perceiver_embeddings': perceiver_embeddings, 'image_attention_mask': image_attention_mask, 'interpolate_pos_encoding': interpolate_pos_encoding} def freeze_model(model, module_exceptions=[]): mapping = {'LayerNorm': tf.keras.layers.LayerNormalization, 'Dense': tf.keras.layers.Dense, 'Embedding': tf.keras.layers.Embedding} module_exceptions_mapped = [mapping[m] for m in module_exceptions] if not hasattr(model, 'layers'): model.trainable = False return model for layer in model.layers: if module_exceptions and any((isinstance(layer, t) for t in module_exceptions_mapped)): layer.trainable = True else: layer.trainable = False return model class TFIdeficsDecoupledEmbedding(tf.keras.layers.Embedding): def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, dtype=None, **kwargs) -> None: super().__init__(input_dim=num_embeddings, output_dim=embedding_dim, dtype=dtype, **kwargs) self.num_embeddings = num_embeddings self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.trainable = False if self.num_additional_embeddings > 0: self.additional_embedding = tf.keras.layers.Embedding(input_dim=self.num_additional_embeddings, output_dim=embedding_dim, dtype=dtype, name='additional_embedding') def call(self, input_ids): if self.num_additional_embeddings == 0: return super().call(input_ids) input_ids = tf.identity(input_ids) additional_vocab_indices = tf.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = tf.gather_nd(input_ids, additional_vocab_indices) additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) input_ids = tf.tensor_scatter_nd_update(input_ids, additional_vocab_indices, tf.zeros(tf.shape(additional_vocab_indices)[0], dtype=input_ids.dtype)) full_vector = super().call(input_ids) full_vector = tf.tensor_scatter_nd_update(full_vector, additional_vocab_indices, additional_embeddings) return full_vector def extra_repr(self) -> str: return 'num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}'.format(self.num_embeddings, self.num_additional_embeddings, self.output_dim, self.partially_freeze) class TFIdeficsDecoupledLinear(tf.keras.layers.Layer): def __init__(self, in_features: int, out_features: int, out_additional_features: int=0, bias: bool=True, partially_freeze: bool=True, **kwargs) -> None: super().__init__(**kwargs) self.out_additional_features = out_additional_features self.partially_freeze = partially_freeze self.in_features = in_features self.out_features = out_features self.use_bias = bias if out_additional_features > 0: self.additional_fc = tf.keras.layers.Dense(units=out_additional_features, use_bias=bias, name='additional_fc') def call(self, inputs: tf.Tensor) -> tf.Tensor: output = tf.linalg.matmul(a=inputs, b=self.weight, transpose_b=True) if self.bias is not None: output = tf.nn.bias_add(output, self.bias) if self.out_additional_features > 0: additional_features = self.additional_fc(inputs) output = tf.concat([output, additional_features], axis=-1) return output def get_config(self): config = super().get_config() config.update({'in_features': self.in_features, 'out_features': self.out_features, 'out_additional_features': self.out_additional_features, 'bias': self.bias is not None, 'partially_freeze': self.partially_freeze}) return config def extra_repr(self) -> str: return 'in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}'.format(self.in_features, self.out_features, self.out_additional_features, self.bias is not None, self.partially_freeze) @classmethod def from_config(cls, config): return cls(**config) def build(self, input_shape=None): if self.built: return self.built = True self.weight = self.add_weight(shape=(self.out_features, self.in_features), trainable=not self.partially_freeze, name='weight') if self.use_bias: self.bias = self.add_weight(shape=(self.out_features,), trainable=not self.partially_freeze, name='bias') else: self.bias = None if getattr(self, 'additional_fc', None) is not None: with tf.name_scope(self.additional_fc.name): self.additional_fc.build(self.in_features) def _make_causal_mask(input_ids_shape, dtype, past_key_values_length=0): (bsz, tgt_len) = input_ids_shape mask = tf.fill((tgt_len, tgt_len), tf.dtypes.as_dtype(dtype).min) mask_cond = tf.range(tgt_len) mask = tf.where(mask_cond[:, None] >= mask_cond[None, :], 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length), dtype=dtype), mask], axis=-1) if bsz is None: mask = tf.expand_dims(mask, 0) mask = tf.expand_dims(mask, 0) mask = tf.tile(mask, [bsz, 1, 1, 1]) else: mask = tf.broadcast_to(mask[None, None, :, :], (bsz, 1, tgt_len, tgt_len + past_key_values_length)) return mask def _expand_mask(mask, dtype, tgt_len=None): (bsz, src_len) = shape_list(mask) tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = tf.expand_dims(tf.expand_dims(mask, 1), 1) expanded_mask = tf.broadcast_to(expanded_mask, [bsz, 1, tgt_len, src_len]) inverted_mask = 1.0 - tf.cast(expanded_mask, dtype) return tf.where(tf.cast(inverted_mask, bool), tf.fill(dims=shape_list(inverted_mask), value=tf.float32.min), inverted_mask) class TFIdeficsRMSNorm(tf.keras.layers.Layer): def __init__(self, hidden_size, eps=1e-06, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.variance_epsilon = eps def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight(name='weight', shape=[self.hidden_size], initializer='ones') super().build(input_shape) def call(self, hidden_states): variance = tf.math.reduce_mean(tf.math.square(tf.cast(hidden_states, tf.float32)), axis=-1, keepdims=True) hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon) if self.weight.dtype in [tf.float16, tf.bfloat16]: hidden_states = tf.cast(hidden_states, self.weight.dtype) return self.weight * hidden_states class TFIdeficsEmbedding(tf.keras.layers.Layer): def __init__(self, dim, max_position_embeddings=2048, base=10000, **kwargs): super().__init__(**kwargs) self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base self.inv_freq = tf.constant(1.0 / self.base ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) def _compute_cos_sin(self, seq_len): t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum('i, j -> ij', t, self.inv_freq) emb = tf.concat((freqs, freqs), axis=-1) return (tf.cos(emb), tf.sin(emb)) def call(self, x, seq_len=None): if seq_len is None: seq_len = shape_list(x)[2] return self._compute_cos_sin(seq_len=seq_len) def rotate_half(x): x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = tf.gather(cos, position_ids) sin = tf.gather(sin, position_ids) cos = tf.expand_dims(cos, 1) sin = tf.expand_dims(sin, 1) q_embed = q * cos + rotate_half(q) * sin k_embed = k * cos + rotate_half(k) * sin return (q_embed, k_embed) class TFIdeficsMLP(tf.keras.layers.Layer): def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, **kwargs): super().__init__(**kwargs) self.gate_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name='gate_proj') self.down_proj = tf.keras.layers.Dense(hidden_size, use_bias=False, name='down_proj') self.up_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name='up_proj') self.act_fn = get_tf_activation(hidden_act) self.intermediate_size = intermediate_size self.hidden_size = hidden_size def call(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'gate_proj', None) is not None: with tf.name_scope(self.gate_proj.name): self.gate_proj.build(self.hidden_size) if getattr(self, 'down_proj', None) is not None: with tf.name_scope(self.down_proj.name): self.down_proj.build(self.intermediate_size) if getattr(self, 'up_proj', None) is not None: with tf.name_scope(self.up_proj.name): self.up_proj.build(self.hidden_size) class TFIdeficsAttention(tf.keras.layers.Layer): def __init__(self, hidden_size: int, num_heads: int, dropout: float=0.0, is_cross_attention: bool=False, config: IdeficsConfig=None, qk_layer_norms: bool=False, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.dropout = dropout self.config = config self.is_causal = True if self.head_dim * num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {num_heads}).') self.is_cross_attention = is_cross_attention self.q_proj = tf.keras.layers.Dense(num_heads * self.head_dim, use_bias=False, name='q_proj') self.k_proj = tf.keras.layers.Dense(num_heads * self.head_dim, use_bias=False, name='k_proj') self.v_proj = tf.keras.layers.Dense(num_heads * self.head_dim, use_bias=False, name='v_proj') self.o_proj = tf.keras.layers.Dense(hidden_size, use_bias=False, name='o_proj') self.rotary_emb = TFIdeficsEmbedding(self.head_dim, name='rotary_emb') self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name='q_layer_norm') self.k_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name='k_layer_norm') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor]=None, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[Tuple[tf.Tensor]]]: is_cross_attention = self.is_cross_attention or key_value_states is not None (bsz, q_len, _) = shape_list(hidden_states) query_states = self._shape(self.q_proj(hidden_states), q_len, bsz) if not is_cross_attention: key_states = self._shape(self.k_proj(hidden_states), q_len, bsz) value_states = self._shape(self.v_proj(hidden_states), q_len, bsz) else: (_, kv_len, _) = shape_list(key_value_states) key_states = self._shape(self.k_proj(key_value_states), kv_len, bsz) value_states = self._shape(self.v_proj(key_value_states), kv_len, bsz) kv_seq_len = shape_list(key_states)[-2] if past_key_value is not None: kv_seq_len += shape_list(past_key_value[0])[-2] if not is_cross_attention: if tf.is_tensor(kv_seq_len): seq_len = tf.reduce_max(kv_seq_len, q_len) else: seq_len = max(kv_seq_len, q_len) (cos, sin) = self.rotary_emb(value_states, seq_len) (query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) past_key_value = (key_states, value_states) if use_cache else None if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) tf.debugging.assert_equal(tf.shape(attention_mask), [bsz, 1, q_len, kv_seq_len], message=f'Attention weights should be of size {[bsz, 1, q_len, kv_seq_len]}, but is {tf.shape(attention_mask)}') attn_output = scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, is_causal=self.is_causal and attention_mask is None and (q_len > 1)) tf.debugging.assert_equal(tf.shape(attn_output), [bsz, self.num_heads, q_len, self.head_dim], message=f'Attention weights should be of size {[bsz, self.num_heads, q_len, self.head_dim]}, but is {tf.shape(attn_output)}') attn_output = tf.reshape(tf.transpose(attn_output, perm=[0, 2, 1, 3]), (bsz, q_len, self.hidden_size)) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once('attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead') return (attn_output, attn_weights, past_key_value) def build(self, input_shape=None): if self.built: return self.built = True if self.is_cross_attention: kv_input_dim = self.hidden_size if not hasattr(self.config.vision_config, 'embed_dim') else self.config.vision_config.embed_dim else: kv_input_dim = self.hidden_size if getattr(self, 'o_proj', None) is not None: with tf.name_scope(self.o_proj.name): self.o_proj.build(self.num_heads * self.head_dim) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build(self.hidden_size) if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build(kv_input_dim) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build(kv_input_dim) if getattr(self, 'rotary_emb', None) is not None: with tf.name_scope(self.rotary_emb.name): self.rotary_emb.build(None) class TFIdeficsDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.self_attn = TFIdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, name='self_attn') self.mlp = TFIdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name='mlp') self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name='input_layernorm') self.post_attention_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name='post_attention_layernorm') self.dropout = config.dropout def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_value: Optional[Tuple[tf.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, training=False) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'self_attn', None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, 'mlp', None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, 'input_layernorm', None) is not None: with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) if getattr(self, 'post_attention_layernorm', None) is not None: with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) class TFIdeficsGatedCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.cross_attn = TFIdeficsAttention(hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms, name='cross_attn') self.mlp = TFIdeficsMLP(hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name='mlp') self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name='input_layernorm') self.post_attention_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name='post_attention_layernorm') self.config = config.dropout self.act_cross_attn = tf.keras.activations.tanh self.act_dense = tf.keras.activations.tanh self.alpha_initializer = config.alpha_initializer self.alpha_type = config.alpha_type self.alphas_initializer_range = config.alphas_initializer_range def build(self, input_shape): if self.built: return self.built = True if self.alpha_initializer == 'zeros': if self.alpha_type == 'vector': self.alpha_cross_attn = self.add_weight(shape=(1, 1, self.hidden_size), initializer='zeros', trainable=True, name='alpha_cross_attn') self.alpha_dense = self.add_weight(shape=(1, 1, self.hidden_size), initializer='zeros', trainable=True, name='alpha_dense') elif self.alpha_type == 'float': self.alpha_cross_attn = self.add_weight(shape=(1,), initializer='zeros', trainable=True, name='alpha_cross_attn') self.alpha_dense = self.add_weight(shape=(1,), initializer='zeros', trainable=True, name='alpha_dense') else: raise ValueError(f'Unknown value for `alpha_type` ({self.alpha_type})') elif self.alpha_initializer == 'ones': if self.alpha_type == 'vector': self.alpha_cross_attn = self.add_weight(shape=(1, 1, self.hidden_size), initializer='ones', trainable=True, name='alpha_cross_attn') self.alpha_dense = self.add_weight(shape=(1, 1, self.hidden_size), initializer='ones', trainable=True, name='alpha_dense') elif self.alpha_type == 'float': self.alpha_cross_attn = self.add_weight(shape=(1,), initializer='ones', trainable=True, name='alpha_cross_attn') self.alpha_dense = self.add_weight(shape=(1,), initializer='ones', trainable=True, name='alpha_dense') else: raise ValueError(f'Unknown value for `alpha_type` ({self.alpha_type})') elif self.alpha_initializer in {'normal', 'gaussian', 'random'}: if self.alpha_type == 'vector': self.alpha_cross_attn = self.add_weight(shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name='alpha_cross_attn') self.alpha_dense = self.add_weight(shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name='alpha_dense') elif self.alpha_type == 'float': self.alpha_cross_attn = self.add_weight(shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name='alpha_type') self.alpha_dense = self.add_weight(shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name='alpha_dense') else: raise ValueError(f'Unknown value for `alpha_type` ({self.alpha_type})') else: raise NotImplementedError(f'Alpha initialization scheme {self.alpha_initializer} not yet implemented!') if not (hasattr(self, 'alpha_cross_attn') and hasattr(self, 'alpha_dense')): raise ValueError('Alpha parameters not initialized correctly!') with tf.name_scope(self.cross_attn.name): self.cross_attn.build(None) with tf.name_scope(self.mlp.name): self.mlp.build(None) with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) super().build(input_shape) def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, image_hidden_states: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, cross_attention_gate: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, past_key_value: Optional[Tuple[tf.Tensor]]=None) -> Tuple[tf.Tensor, Optional[Tuple[tf.Tensor, tf.Tensor]]]: if image_hidden_states is None: raise ValueError('`image_hidden_states` is required for Idefics cross attention module which are visual features to be conditioned on.') if cross_attention_gate is None: raise ValueError('`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images.') if past_key_value is not None: raise NotImplementedError('Past key value states are not implemented for Idefics cross attention module.') residual = hidden_states hidden_states = self.input_layernorm(hidden_states) (hidden_states, self_attn_weights, present_key_value) = self.cross_attn(hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) mask = tf.cast(cross_attention_gate == 0, dtype=hidden_states.dtype) mask = tf.expand_dims(mask, -1) hidden_states = tf.where(tf.broadcast_to(mask, tf.shape(hidden_states)) == 1, tf.zeros_like(hidden_states), hidden_states) hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = '\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a TensorFlow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) subclass.\n Use it as a regular TensorFlow Layer and refer to the TensorFlow documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`IdeficsConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare LLaMA Model outputting raw hidden-states without any specific head on top.', LLAMA_START_DOCSTRING) class TFIdeficsPreTrainedModel(TFPreTrainedModel): config_class = IdeficsConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['TFIdeficsDecoderLayer', 'TFIdeficsGatedCrossAttentionLayer'] LLAMA_INPUTS_DOCSTRING = "\n Args:\n input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare LLaMA Model outputting raw hidden-states without any specific head on top.', LLAMA_START_DOCSTRING) @keras_serializable class TFIdeficsMainLayer(tf.keras.layers.Layer): config_class = IdeficsConfig def __init__(self, config: IdeficsConfig, add_pooling_year: bool=True, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = TFIdeficsDecoupledEmbedding(num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, name='embed_tokens') self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = TFIdeficsVisionTransformer(config.vision_config, name='vision_model') if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = TFIdeficsPerceiverResampler(config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents, name='perceiver_resampler') self.decoder_layers = [TFIdeficsDecoderLayer(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)] self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = [TFIdeficsGatedCrossAttentionLayer(config, name=f'gated_cross_attn_layers.{i}') for i in range(num_cross_layers)] self.gradient_checkpointing = False self.norm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name='norm') self.gradient_checkpointing = False self.freeze_relevant_params(config) def freeze_relevant_params(self, config=None): if config is None: config = self.config if config.freeze_text_layers: self.freeze_text_layers(config.freeze_text_module_exceptions) if config.freeze_vision_layers: freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) def freeze_text_layers(self, module_exceptions=[]): for module in [self.decoder_layers, self.norm]: freeze_model(module, module_exceptions=module_exceptions) def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): combined_attention_mask = None combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length) if attention_mask is not None: expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask return combined_attention_mask @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def call(self, input_ids: TFModelInputType | None=None, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_values: Optional[List[tf.Tensor]]=None, inputs_embeds: Optional[tf.Tensor]=None, pixel_values: Optional[tf.Tensor]=None, image_encoder_embeddings: Optional[tf.Tensor]=None, perceiver_embeddings: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[TFIdeficsBaseModelOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time') elif input_ids is not None: (batch_size, seq_length) = shape_list(input_ids) elif inputs_embeds is not None: (batch_size, seq_length, _) = shape_list(inputs_embeds) else: raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds') seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = shape_list(past_key_values[0][0])[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int32), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) elif position_ids is None: position_ids = tf.range(past_key_values_length, seq_length + past_key_values_length, dtype=tf.int32) position_ids = tf.expand_dims(position_ids, 0) no_images = False if sum((int(pixel_values is None), int(image_encoder_embeddings is None), int(perceiver_embeddings is None))) != 2: raise ValueError('Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None.') elif pixel_values is not None: no_images = tf.reduce_sum(tf.cast(pixel_values, dtype=tf.int32)) == 0 pixel_values = tf.cast(pixel_values, dtype=self.dtype) if len(pixel_values.shape) == 4: batch_size = shape_list(pixel_values)[0] num_images = shape_list(pixel_values)[0] elif len(pixel_values.shape) == 5: (batch_size, num_images) = shape_list(pixel_values)[:2] pixel_values = tf.reshape(pixel_values, [batch_size * num_images, *pixel_values.shape[2:]]) image_hidden_states = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state elif image_encoder_embeddings is not None: (batch_size, num_images, image_seq_len, image_hidden_size) = shape_list(image_encoder_embeddings) image_hidden_states = tf.cast(image_encoder_embeddings, dtype=self.dtype) image_hidden_states = tf.reshape(image_hidden_states, (batch_size * num_images, image_seq_len, image_hidden_size)) if self.config.use_resampler: if perceiver_embeddings is None: perceiver_embeddings = self.perceiver_resampler(image_hidden_states) (image_seq_len, image_hidden_size) = shape_list(perceiver_embeddings)[1:3] else: (batch_size, num_images, image_seq_len, image_hidden_size) = shape_list(perceiver_embeddings) image_hidden_states = perceiver_embeddings elif perceiver_embeddings is None: (image_seq_len, image_hidden_size) = shape_list(image_hidden_states)[1:3] else: raise ValueError('If `perceiver_embeddings` are passed, use_resampler should be True') image_hidden_states = tf.reshape(image_hidden_states, (batch_size, num_images * image_seq_len, image_hidden_size)) if pixel_values is not None and len(pixel_values.shape) == 4 and (image_attention_mask is None): image_attention_mask = tf.zeros((batch_size, seq_length, 1), dtype=tf.int32) text_seq_len = shape_list(image_attention_mask)[1] image_attention_mask = tf.expand_dims(image_attention_mask, -1) image_attention_mask = tf.repeat(image_attention_mask, repeats=image_seq_len) image_attention_mask = tf.reshape(image_attention_mask, (batch_size, text_seq_len, num_images * image_seq_len)) if image_hidden_states is not None: (image_batch_size, image_sequence_length, _) = shape_list(image_hidden_states) image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = tf.ones(image_hidden_shape, dtype=tf.int32) image_attention_mask = invert_attention_mask(image_attention_mask) else: image_attention_mask = None cross_attention_gate = tf.squeeze(tf.cast(tf.reduce_any(image_attention_mask == 0, axis=-1), dtype=self.dtype), axis=1) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length_with_past), dtype=tf.bool) attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length) hidden_states = inputs_embeds if self.gradient_checkpointing and training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for (idx, decoder_layer) in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None def vblock(main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, layer_idx, cross_layer_interval, gated_cross_attn_layers): if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock(hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None) hidden_states = outputs[0] layer_outputs = main_block(hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) return layer_outputs if self.gradient_checkpointing and training: past_key_value = None if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = tf.recompute_grad(vblock, decoder_layer, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, output_attentions, use_cache, no_images, idx, self.cross_layer_interval, self.gated_cross_attn_layers) else: layer_outputs = vblock(decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None image_hidden_states = tf.reshape(image_hidden_states, (batch_size, num_images, image_seq_len, image_hidden_size)) if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] if v is not None)) return TFIdeficsBaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embed_tokens', None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, 'vision_model', None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, 'norm', None) is not None: with tf.name_scope(self.norm.name): self.norm.build(None) if getattr(self, 'perceiver_resampler', None) is not None: with tf.name_scope(self.perceiver_resampler.name): self.perceiver_resampler.build(None) if getattr(self, 'decoder_layers', None) is not None: for layer in self.decoder_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, 'gated_cross_attn_layers', None) is not None: for layer in self.gated_cross_attn_layers: with tf.name_scope(layer.name): layer.build(None) class TFIdeficsModel(TFIdeficsPreTrainedModel): def __init__(self, config: IdeficsConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFIdeficsMainLayer(config, name='model') def call(self, input_ids: TFModelInputType | None=None, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_values: Optional[List[tf.Tensor]]=None, inputs_embeds: Optional[tf.Tensor]=None, pixel_values: Optional[tf.Tensor]=None, image_encoder_embeddings: Optional[tf.Tensor]=None, perceiver_embeddings: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[TFIdeficsBaseModelOutputWithPast, Tuple[tf.Tensor]]: outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class TFIdeficsForVisionText2Text(TFPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_missing = ['lm_head.weight'] _tied_weights_keys = ['model.embed_tokens.weight', 'lm_head.weight'] config_class = IdeficsConfig def __init__(self, config, vision_model=None, **kwargs): super().__init__(config, **kwargs) self.model = TFIdeficsMainLayer(config, name='model') self.lm_head = TFIdeficsDecoupledLinear(config.hidden_size, config.vocab_size, config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head, name='lm_head') def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def tie_weights(self): output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, 'tie_word_embeddings', True): output_embeddings.weight = input_embeddings.weight if input_embeddings.num_additional_embeddings > 0: assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): output_embeddings.out_features = input_embeddings.num_embeddings if hasattr(output_embeddings, 'out_additional_features') and hasattr(input_embeddings, 'num_additional_embeddings'): output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFIdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def call(self, input_ids: TFModelInputType | None=None, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, past_key_values: Optional[List[tf.Tensor]]=None, inputs_embeds: Optional[tf.Tensor]=None, pixel_values: Optional[tf.Tensor]=None, image_encoder_embeddings: Optional[tf.Tensor]=None, perceiver_embeddings: Optional[tf.Tensor]=None, image_attention_mask: Optional[tf.Tensor]=None, labels: Optional[tf.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, training=False) -> Union[TFIdeficsCausalLMOutputWithPast, Tuple[tf.Tensor]]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:] shift_logits = logits[..., :-1, :][shift_attention_mask != 0] shift_labels = labels[..., 1:][shift_attention_mask != 0] else: shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] loss = self.hf_compute_loss(labels=tf.reshape(shift_labels, [-1]), logits=tf.reshape(shift_logits, [-1, shift_logits.shape[-1]])) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFIdeficsCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): image_hidden_states = kwargs.pop('image_hidden_states', None) if image_hidden_states is not None: if self.config.use_resampler: kwargs['perceiver_embeddings'] = image_hidden_states else: kwargs['image_encoder_embeddings'] = image_hidden_states kwargs['pixel_values'] = None inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ['token_type_ids'] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation(*args, **model_kwargs): return expand_inputs_for_generation(*args, **model_kwargs) @staticmethod def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder): return update_model_kwargs_for_generation(outputs, model_kwargs) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple((tf.gather(past_state, beam_idx) for past_state in layer_past)),) return reordered_past def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'model', None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, 'lm_head', None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) # File: transformers-main/src/transformers/models/idefics/perceiver.py """""" from typing import Optional, Tuple import torch import torch.nn as nn from .configuration_idefics import IdeficsConfig class IdeficsPerceiverResampler(nn.Module): def __init__(self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int) -> None: super().__init__() (self.embed_dim, self.n_heads, self.head_dim, self.n_latents) = (embed_dim, n_heads, head_dim, n_latents) self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True) self.intermediate_dim = self.embed_dim * 4 if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim * 4 self.blocks = nn.ModuleList([nn.ModuleList([IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms), IdeficsMLP(self.intermediate_dim, config)]) for _ in range(depth)]) self.layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, context: torch.Tensor) -> torch.Tensor: latents = self.latents.repeat(context.shape[0], 1, 1) for (attn, ff) in self.blocks: latents = attn(context, latents) + latents latents = ff(latents) + latents return self.layer_norm(latents) class IdeficsPerceiverAttention(nn.Module): def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None: super().__init__() (self.embed_dim, self.n_heads, self.head_dim) = (embed_dim, n_heads, head_dim) self.qk_layer_norms = qk_layer_norms self.context_layer_norm = nn.LayerNorm(self.embed_dim) self.latents_layer_norm = nn.LayerNorm(self.embed_dim) if self.qk_layer_norms: self.q_layer_norm = nn.LayerNorm(self.head_dim) self.k_layer_norm = nn.LayerNorm(self.head_dim) self.qk_scale = self.head_dim ** (-0.5) self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False) self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False) def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) (batch_size, seq_length, embed_dim) = context.shape[:3] q = self.q_proj(latents) k = self.k_proj(torch.cat([context, latents], dim=-2)) v = self.v_proj(torch.cat([context, latents], dim=-2)) (q, k, v) = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = torch.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k) stabilized_scores = scores - scores.amax(dim=-1, keepdim=True).detach() attn = stabilized_scores.softmax(dim=-1) resampled = torch.einsum('... i j, ... j d -> ... i d', attn, v) return self.output_proj(resampled.transpose(1, 2).flatten(-2)) class IdeficsMLP(nn.Module): def __init__(self, intermediate_size, config: IdeficsConfig): super().__init__() self.embed_dim = config.vision_config.embed_dim self.ln = nn.LayerNorm(self.embed_dim) self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False) self.act = nn.ReLU() self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False) def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.ln(hidden_states) hidden_states = self.fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states # File: transformers-main/src/transformers/models/idefics/perceiver_tf.py """""" from typing import Optional, Tuple import tensorflow as tf from ...modeling_tf_utils import shape_list from .configuration_idefics import IdeficsConfig class TFIdeficsPerceiverResampler(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int, **kwargs) -> None: super().__init__(**kwargs) (self.embed_dim, self.n_heads, self.head_dim, self.n_latents) = (embed_dim, n_heads, head_dim, n_latents) self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver self.intermediate_dim = self.embed_dim * 4 if not hasattr(config.vision_config, 'embed_dim') else config.vision_config.embed_dim * 4 self.blocks = [] for i in range(depth): self.blocks.append([TFIdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms, name=f'blocks.{i}.0'), TFIdeficsMLP(self.intermediate_dim, config, name=f'blocks.{i}.1')]) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='layer_norm') def build(self, input_shape): self.latents = self.add_weight(shape=(self.n_latents, self.embed_dim), initializer='random_normal', trainable=True, name='latents') super().build(input_shape) def call(self, context: tf.Tensor) -> tf.Tensor: latents = tf.expand_dims(self.latents, axis=0) latents = tf.tile(latents, [tf.shape(context)[0], 1, 1]) for (attn, ff) in self.blocks: latents = attn(context, latents) + latents latents = ff(latents) + latents return self.layer_norm(latents) class TFIdeficsPerceiverAttention(tf.keras.layers.Layer): def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool, **kwargs) -> None: super().__init__(**kwargs) (self.embed_dim, self.n_heads, self.head_dim) = (embed_dim, n_heads, head_dim) self.qk_layer_norms = qk_layer_norms self.context_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='context_layer_norm') self.latents_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='latents_layer_norm') if self.qk_layer_norms: self.q_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='q_layer_norm') self.k_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='k_layer_norm') self.qk_scale = self.head_dim ** (-0.5) self.q_proj = tf.keras.layers.Dense(self.n_heads * self.head_dim, use_bias=False, name='q_proj') self.k_proj = tf.keras.layers.Dense(self.n_heads * self.head_dim, use_bias=False, name='k_proj') self.v_proj = tf.keras.layers.Dense(self.n_heads * self.head_dim, use_bias=False, name='v_proj') self.output_proj = tf.keras.layers.Dense(embed_dim, use_bias=False, name='output_proj') def call(self, context: tf.Tensor, latents: tf.Tensor) -> tf.Tensor: context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) (batch_size, seq_length, embed_dim) = shape_list(context) q = self.q_proj(latents) k = self.k_proj(tf.concat([context, latents], axis=-2)) v = self.v_proj(tf.concat([context, latents], axis=-2)) (q, k, v) = [tf.transpose(tf.reshape(x, (batch_size, x.shape[1], self.n_heads, self.head_dim)), perm=[0, 2, 1, 3]) for x in (q, k, v)] if self.qk_layer_norms: q = self.q_layer_norm(q) k = self.k_layer_norm(k) scores = tf.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k) stabilized_scores = scores - tf.reduce_max(scores, axis=-1, keepdims=True) attn = tf.nn.softmax(stabilized_scores, axis=-1) resampled = tf.einsum('... i j, ... j d -> ... i d', attn, v) return self.output_proj(tf.reshape(tf.transpose(resampled, perm=[0, 2, 1, 3]), (batch_size, -1, self.n_heads * self.head_dim))) class TFIdeficsMLP(tf.keras.layers.Layer): def __init__(self, intermediate_size, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.vision_config.embed_dim self.ln = tf.keras.layers.LayerNormalization(epsilon=1e-05, name='ln') self.fc = tf.keras.layers.Dense(intermediate_size, use_bias=False, name='fc') self.act = tf.keras.layers.ReLU(name='act') self.c_proj = tf.keras.layers.Dense(self.embed_dim, use_bias=False, name='c_proj') def call(self, hidden_states: Optional[Tuple[tf.Tensor]]) -> tf.Tensor: hidden_states = self.ln(hidden_states) hidden_states = self.fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states # File: transformers-main/src/transformers/models/idefics/processing_idefics.py """""" from typing import Callable, List, Optional, Union from urllib.parse import urlparse from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy from ...utils import is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf IMAGE_TOKEN = '' def incremental_to_binary_attention_mask(incremental_mask, return_tensors, num_classes=-1): if num_classes != -1: if return_tensors == 'pt': incremental_mask[incremental_mask >= num_classes] = -1 elif return_tensors == 'tf': incremental_mask = tf.where(incremental_mask >= num_classes, -1, incremental_mask) if return_tensors == 'pt': negatives = incremental_mask == -1 incremental_mask[negatives] = 0 attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes) attn_mask[negatives, :] = 0 elif return_tensors == 'tf': negatives = tf.equal(incremental_mask, -1) incremental_mask = tf.where(negatives, 0, incremental_mask) attn_mask = tf.one_hot(incremental_mask, depth=num_classes) negatives_expanded = tf.expand_dims(negatives, -1) attn_mask = tf.where(negatives_expanded, tf.zeros_like(attn_mask), attn_mask) return attn_mask def image_attention_mask_for_packed_input_ids(input_ids, tokenizer, return_tensors): if return_tensors == 'pt': return image_attention_mask_for_packed_input_ids_pt(input_ids, tokenizer) elif return_tensors == 'tf': return image_attention_mask_for_packed_input_ids_tf(input_ids, tokenizer) def image_attention_mask_for_packed_input_ids_pt(input_ids, tokenizer): image_attention_mask = torch.full_like(input_ids, fill_value=-1) next_image_attention_mask = torch.full_like(input_ids, fill_value=-1) image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for (idx, token_id) in enumerate(input_ids[batch_idx]): if token_id == image_token_id: count += 1 image_attention_mask[batch_idx][idx] = count seen_eod = False else: image_attention_mask[batch_idx][idx] = count if seen_eod: image_attention_mask[batch_idx][idx] = -1 if token_id == eod_token_id: seen_eod = True for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1): token_id = input_ids[batch_idx][idx] if token_id == image_token_id: count += 1 next_image_attention_mask[batch_idx][idx] = count seen_eod = False else: next_image_attention_mask[batch_idx][idx] = count if token_id == eod_token_id: seen_eod = True if seen_eod: next_image_attention_mask[batch_idx][idx] = -1 non_negative_indices = next_image_attention_mask[batch_idx] != -1 next_image_attention_mask[batch_idx][non_negative_indices] -= count next_image_attention_mask[batch_idx][non_negative_indices] *= -1 return (image_attention_mask, next_image_attention_mask) def image_attention_mask_for_packed_input_ids_tf(input_ids, tokenizer): image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id batch_size = tf.shape(input_ids)[0] image_attention_mask = tf.fill(tf.shape(input_ids), -1) next_image_attention_mask = tf.fill(tf.shape(input_ids), -1) for batch_idx in range(batch_size): count = -1 seen_eod = False seq_length = tf.shape(input_ids)[1] for idx in range(seq_length - 1, -1, -1): token_id = input_ids[batch_idx, idx].numpy() if token_id == image_token_id: count += 1 indices = [[batch_idx, idx]] updates = [count] image_attention_mask = tf.tensor_scatter_nd_update(image_attention_mask, indices, updates) next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates) elif token_id == eod_token_id and (not seen_eod): seen_eod = True count = 0 indices = [[batch_idx, idx]] updates = [count] next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates) if seen_eod and token_id != eod_token_id: indices = [[batch_idx, idx]] updates = [-1] next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates) return (image_attention_mask, next_image_attention_mask) def is_url(string): if ' ' in string: return False result = urlparse(string) return all([result.scheme, result.netloc]) class IdeficsProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] valid_kwargs = ['image_size', 'add_end_of_utterance_token'] image_processor_class = 'IdeficsImageProcessor' tokenizer_class = 'LlamaTokenizerFast' def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs): if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.default_image_dims = (self.image_processor.image_num_channels, self.image_processor.image_size, self.image_processor.image_size) self.tokenizer_was_trained_with_end_of_utterance_token = True if '' in self.tokenizer.special_tokens_map.get('additional_special_tokens', []) else False def __call__(self, prompts: Union[List[TextInput], List[List[TextInput]]], padding: Union[bool, str, PaddingStrategy]='longest', truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, transform: Callable=None, add_eos_token=False, add_end_of_utterance_token=None, debug=False, return_tensors='pt') -> BatchEncoding: if add_end_of_utterance_token is None: add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token if not any((isinstance(i, list) for i in prompts)): prompts = [prompts] fake_token = '' image_token = '' end_of_utterance_token = '' def image_tokens(last_was_image): if last_was_image: return image_token + fake_token else: return fake_token + image_token + fake_token all_prompts = [] all_images = [] for sample in prompts: full_text = f'{self.tokenizer.bos_token}' image_objects = [] last_was_image = False last_was_text = False for (i, item) in enumerate(sample): if i > 0: last_was_text = True if not last_was_image else False if isinstance(item, str): item = item.strip(' ') if is_url(item): image = self.image_processor.fetch_images(item) full_text += image_tokens(last_was_image) image_objects.append(image) last_was_image = True else: if add_end_of_utterance_token and last_was_text: full_text += end_of_utterance_token full_text += item last_was_image = False else: full_text += image_tokens(last_was_image) image_objects.append(item) last_was_image = True if add_eos_token: full_text += self.tokenizer.eos_token if debug is True: print(f'full_text={full_text!r}') image_objects = self.image_processor(image_objects, transform=transform, return_tensors=return_tensors) all_prompts.append(full_text) all_images.append(image_objects) text_encoding = self.tokenizer(text=all_prompts, add_special_tokens=False, padding=padding, truncation=truncation, max_length=max_length) all_texts = text_encoding['input_ids'] all_attention_masks = text_encoding['attention_mask'] max_num_images = max((len(x) for x in all_images)) max_num_images = max(1, max_num_images) at_least_one_image = sum((len(x) for x in all_images)) > 0 output_input_ids = [] output_images = [] output_attention_masks = [] for (text, attention_mask, images) in zip(all_texts, all_attention_masks, all_images): padded_input_ids = text image_count = padded_input_ids.count(self.image_token_id) local_max_num_images = min(image_count, max_num_images) current_images = images[:local_max_num_images] if len(current_images) > 0: if return_tensors == 'pt': padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:]) padded_image_tensor[:current_images.size(0)] = current_images elif return_tensors == 'tf': image_shape = tf.shape(current_images)[1:] padded_shape = tf.concat([[max_num_images], image_shape], axis=0) padded_image_tensor = tf.zeros(padded_shape, dtype=current_images.dtype) num_images = tf.shape(current_images)[0] indices = tf.reshape(tf.range(num_images), (-1, 1)) updates = current_images padded_image_tensor = tf.tensor_scatter_nd_update(padded_image_tensor, indices, updates) elif return_tensors == 'pt': padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims) elif return_tensors == 'tf': padded_image_tensor = tf.zeros((max_num_images, *self.default_image_dims)) output_images.append(padded_image_tensor) if return_tensors == 'pt': output_input_ids.append(torch.tensor(padded_input_ids)) output_attention_masks.append(torch.tensor(attention_mask)) elif return_tensors == 'tf': output_input_ids.append(tf.convert_to_tensor(padded_input_ids, dtype=tf.int32)) output_attention_masks.append(attention_mask) if return_tensors == 'pt': output_input_ids = torch.stack(output_input_ids) output_images = torch.stack(output_images) output_attention_masks = torch.stack(output_attention_masks) elif return_tensors == 'tf': output_input_ids = tf.stack(output_input_ids) output_images = tf.stack(output_images) output_attention_masks = tf.stack(output_attention_masks) if at_least_one_image: (image_attention_mask, _) = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer, return_tensors) image_attention_mask = incremental_to_binary_attention_mask(image_attention_mask, return_tensors, num_classes=max_num_images) elif return_tensors == 'pt': image_attention_mask = torch.zeros(output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool) elif return_tensors == 'tf': image_attention_mask = tf.zeros((output_input_ids.shape[0], output_input_ids.shape[1], 1), dtype=tf.bool) return BatchFeature(data={'input_ids': output_input_ids, 'attention_mask': output_attention_masks, 'pixel_values': output_images, 'image_attention_mask': image_attention_mask}) def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/idefics/vision.py """""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, logging from .configuration_idefics import IdeficsVisionConfig logger = logging.get_logger(__name__) @dataclass class IdeficsVisionModelOutput(ModelOutput): image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None class IdeficsVisionEmbeddings(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 pos_embed = self.position_embedding(self.position_ids) num_positions = pos_embed.shape[1] - 1 if num_patches == num_positions and height == width: return pos_embed class_pos_embed = pos_embed[:, 0] patch_pos_embed = pos_embed[:, 1:] embed_dim = embeddings.shape[-1] num_h_patches = height // self.config.patch_size num_w_patches = width // self.config.patch_size (num_h_patches, num_w_patches) = (num_h_patches + 0.1, num_w_patches + 0.1) sqrt_num_positions = math.sqrt(num_positions) patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16 if fp32_upcasting: logger.warning_once("Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead.") patch_pos_embed = patch_pos_embed.to(torch.float) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions), mode='bicubic', align_corners=False) if fp32_upcasting: patch_pos_embed = patch_pos_embed.to(torch.bfloat16) if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]: raise ValueError(f"Number of patches for images ({(int(num_h_patches), int(num_w_patches))}) don't match the shape of position embedding ({(patch_pos_embed.shape[-2], patch_pos_embed.shape[-1])})") patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim) return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, num_channels, height, width) = pixel_values.shape if not interpolate_pos_encoding: if height != self.image_size or width != self.image_size: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`") target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class IdeficsVisionAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (bsz, tgt_len, embed_dim) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) class IdeficsVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class IdeficsVisionEncoderLayer(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IdeficsVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = IdeficsVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class IdeficsVisionEncoder(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class IdeficsVisionTransformer(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = IdeficsVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = IdeficsVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) # File: transformers-main/src/transformers/models/idefics/vision_tf.py """""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling from ...modeling_tf_utils import TFPreTrainedModel, shape_list from ...tf_utils import flatten from ...utils import ModelOutput, logging from .configuration_idefics import IdeficsVisionConfig logger = logging.get_logger(__name__) @dataclass class TFIdeficsVisionModelOutput(ModelOutput): image_embeds: Optional[tf.Tensor] = None last_hidden_state: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None class TFIdeficsVisionEmbeddings(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = tf.keras.layers.Conv2D(filters=self.embed_dim, kernel_size=self.patch_size, strides=self.patch_size, use_bias=False, padding='valid', data_format='channels_last', name='patch_embedding') self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = tf.keras.layers.Embedding(self.num_positions, self.embed_dim, name='position_embedding') def interpolate_pos_encoding(self, embeddings: tf.Tensor, height: int, width: int) -> tf.Tensor: num_patches = shape_list(embeddings)[1] - 1 pos_embed = self.position_embedding(self.position_ids) num_positions = shape_list(pos_embed)[1] - 1 if num_patches == num_positions and height == width: return pos_embed class_pos_embed = pos_embed[:, 0] patch_pos_embed = pos_embed[:, 1:] embed_dim = shape_list(embeddings)[-1] num_h_patches = height // self.config.patch_size num_w_patches = width // self.config.patch_size (num_h_patches, num_w_patches) = (num_h_patches + 0.1, num_w_patches + 0.1) sqrt_num_positions = math.sqrt(float(num_positions)) patch_pos_embed = tf.reshape(patch_pos_embed, (1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)) scale_height = num_h_patches / sqrt_num_positions scale_width = num_w_patches / sqrt_num_positions original_height = tf.cast(tf.shape(patch_pos_embed)[1], tf.float32) original_width = tf.cast(tf.shape(patch_pos_embed)[2], tf.float32) new_height = tf.cast(original_height * scale_height, tf.int32) new_width = tf.cast(original_width * scale_width, tf.int32) patch_pos_embed = tf.image.resize(patch_pos_embed, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC) if int(num_h_patches) != shape_list(patch_pos_embed)[-3] or int(num_w_patches) != shape_list(patch_pos_embed)[-2]: raise ValueError(f"Number of patches for images ({(int(num_h_patches), int(num_w_patches))}) don't match the shape of position embedding ({(shape_list(patch_pos_embed)[-2], shape_list(patch_pos_embed)[-1])})") patch_pos_embed = tf.reshape(patch_pos_embed, (1, -1, embed_dim)) return tf.concat((class_pos_embed[tf.newaxis, :], patch_pos_embed), axis=1) def call(self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool=False) -> tf.Tensor: if isinstance(pixel_values, dict): pixel_values = pixel_values['pixel_values'] pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) (batch_size, height, width, num_channels) = shape_list(pixel_values) if not interpolate_pos_encoding: if height != self.image_size or width != self.image_size: raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`") patch_embeds = self.patch_embedding(pixel_values) patch_embeds = flatten(patch_embeds, 1, 2) class_embeds = tf.broadcast_to(self.class_embedding[tf.newaxis, tf.newaxis, :], [batch_size, 1, self.embed_dim]) embeddings = tf.concat([class_embeds, patch_embeds], axis=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings def build(self, input_shape=None): if self.built: return self.built = True self.position_ids = tf.range(self.num_positions, name='self.position_ids')[tf.newaxis, :] self.class_embedding = self.add_weight(shape=(self.embed_dim,), name='class_embedding') if getattr(self, 'patch_embedding', None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build([None, None, None, self.config.num_channels]) if getattr(self, 'position_embedding', None) is not None: with tf.name_scope(self.position_embedding.name): self.position_embedding.build(None) class TFIdeficsVisionAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = tf.keras.layers.Dense(self.embed_dim, name='k_proj') self.v_proj = tf.keras.layers.Dense(self.embed_dim, name='v_proj') self.q_proj = tf.keras.layers.Dense(self.embed_dim, name='q_proj') self.out_proj = tf.keras.layers.Dense(self.embed_dim, name='out_proj') def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), perm=[0, 2, 1, 3]) def call(self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor]=None, causal_attention_mask: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[Tuple[tf.Tensor]]]: (bsz, tgt_len, embed_dim) = shape_list(hidden_states) query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.linalg.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal(tf.shape(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f'Attention weights should be of size {[bsz * self.num_heads, tgt_len, src_len]}, but is {tf.shape(attn_weights)}') if causal_attention_mask is not None: if shape_list(causal_attention_mask) != [bsz, 1, tgt_len, src_len]: raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(causal_attention_mask)}') attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + causal_attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) if attention_mask is not None: if shape_list(attention_mask) != [bsz, 1, tgt_len, src_len]: raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}') attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if output_attentions: attn_weights_reshaped = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) attn_weights = tf.reshape(attn_weights_reshaped, (bsz * self.num_heads, tgt_len, src_len)) else: attn_weights_reshaped = None attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) attn_output = tf.linalg.matmul(attn_probs, value_states) tf.debugging.assert_equal(tf.shape(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f'Attention weights should be of size {[bsz * self.num_heads, tgt_len, self.head_dim]}, but is {tf.shape(attn_output)}') attn_output = tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)) attn_output = tf.transpose(attn_output, perm=[0, 2, 1, 3]) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'k_proj', None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, 'v_proj', None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, 'q_proj', None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build((self.embed_dim, self.embed_dim)) if getattr(self, 'out_proj', None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build((self.embed_dim, self.embed_dim)) class TFIdeficsVisionMLP(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.activation_fn = get_tf_activation(config.hidden_act) self.fc1 = tf.keras.layers.Dense(config.intermediate_size, name='fc1') self.fc2 = tf.keras.layers.Dense(config.hidden_size, name='fc2') def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'fc1', None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build(self.config.hidden_size) if getattr(self, 'fc2', None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build(self.config.intermediate_size) class TFIdeficsVisionEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.hidden_size self.self_attn = TFIdeficsVisionAttention(config, name='self_attn') self.layer_norm1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm1') self.mlp = TFIdeficsVisionMLP(config, name='mlp') self.layer_norm2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm2') def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: Optional[bool]=False) -> Tuple[tf.Tensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layer_norm1', None) is not None: with tf.name_scope(self.layer_norm1.name): self.layer_norm1.build([None, None, self.embed_dim]) if getattr(self, 'layer_norm2', None) is not None: with tf.name_scope(self.layer_norm2.name): self.layer_norm2.build([None, None, self.embed_dim]) class TFIdeficsVisionEncoder(tf.keras.layers.Layer): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layers = [TFIdeficsVisionEncoderLayer(config, name=f'layers.{i}') for i in range(config.num_hidden_layers)] self.gradient_checkpointing = False def call(self, inputs_embeds, attention_mask: Optional[tf.Tensor]=None, causal_attention_mask: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = tf.recompute_grad(create_custom_forward(encoder_layer), hidden_states, attention_mask, causal_attention_mask) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'layers', None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFIdeficsVisionTransformer(TFPreTrainedModel): def __init__(self, config: IdeficsVisionConfig, **kwargs): super().__init__(config, **kwargs) self.config = config self.embed_dim = config.hidden_size self.embeddings = TFIdeficsVisionEmbeddings(config, name='embeddings') self.pre_layrnorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='pre_layrnorm') self.encoder = TFIdeficsVisionEncoder(config, name='encoder') self.post_layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='post_layernorm') def call(self, pixel_values: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, 'embeddings', None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, 'pre_layrnorm', None) is not None: with tf.name_scope(self.pre_layrnorm.name): self.pre_layrnorm.build([None, None, self.embed_dim]) if getattr(self, 'encoder', None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, 'post_layernorm', None) is not None: with tf.name_scope(self.post_layernorm.name): self.post_layernorm.build([None, self.embed_dim]) # File: transformers-main/src/transformers/models/idefics2/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_idefics2': ['Idefics2Config']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_idefics2'] = ['Idefics2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_idefics2'] = ['Idefics2ForConditionalGeneration', 'Idefics2PreTrainedModel', 'Idefics2Model'] _import_structure['processing_idefics2'] = ['Idefics2Processor'] if TYPE_CHECKING: from .configuration_idefics2 import Idefics2Config try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_idefics2 import Idefics2ImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_idefics2 import Idefics2ForConditionalGeneration, Idefics2Model, Idefics2PreTrainedModel from .processing_idefics2 import Idefics2Processor else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure) # File: transformers-main/src/transformers/models/idefics2/configuration_idefics2.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class Idefics2VisionConfig(PretrainedConfig): model_type = 'idefics2' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'idefics2': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class Idefics2PerceiverConfig(PretrainedConfig): model_type = 'idefics2' def __init__(self, hidden_act='silu', resampler_n_latents=64, resampler_depth=3, resampler_n_heads=16, resampler_head_dim=96, num_key_value_heads=4, attention_dropout=0.0, **kwargs): self.hidden_act = hidden_act self.resampler_n_latents = resampler_n_latents self.resampler_depth = resampler_depth self.resampler_n_heads = resampler_n_heads self.num_key_value_heads = num_key_value_heads self.resampler_head_dim = resampler_head_dim self.attention_dropout = attention_dropout if self.num_key_value_heads > self.resampler_n_heads: raise ValueError(f'num_key_value_heads={self.num_key_value_heads} must be less than or equal to resampler_n_heads={self.resampler_n_heads}') super().__init__(**kwargs) class Idefics2Config(PretrainedConfig): model_type = 'idefics2' is_composition = True def __init__(self, use_cache=True, image_token_id=32001, tie_word_embeddings=False, vision_config=None, perceiver_config=None, text_config=None, **kwargs): self.image_token_id = image_token_id self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings if perceiver_config is None: self.perceiver_config = Idefics2PerceiverConfig() logger.info('perciver_config is None, using default perceiver config') elif isinstance(perceiver_config, dict): self.perceiver_config = Idefics2PerceiverConfig(**perceiver_config) elif isinstance(perceiver_config, Idefics2PerceiverConfig): self.perceiver_config = perceiver_config if vision_config is None: self.vision_config = Idefics2VisionConfig() logger.info('vision_config is None, using default vision config') elif isinstance(vision_config, dict): self.vision_config = Idefics2VisionConfig(**vision_config) elif isinstance(vision_config, Idefics2VisionConfig): self.vision_config = vision_config if isinstance(text_config, dict): text_config['model_type'] = text_config['model_type'] if 'model_type' in text_config else 'mistral' text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: logger.info('text_config is None, using default text config') text_config = CONFIG_MAPPING['mistral'](max_position_embeddings=4096 * 8, rms_norm_eps=1e-05, pad_token_id=0, tie_word_embeddings=False) self.text_config = text_config super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings) # File: transformers-main/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py import argparse import copy import torch from accelerate import init_empty_weights from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, Idefics2Config, Idefics2ForConditionalGeneration, Idefics2ImageProcessor, Idefics2Processor, MistralConfig EPILOG_TXT = 'Example:\n python transformers/src/transformers/models/idefics2/convert_idefics2_weights_to_hf.py --original_model_id HuggingFaceM4/idefics2-8b --output_hub_path org/idefics2\n' KEYS_TO_MODIFY_MAPPING = {'lm_head.weight': 'lm_head.linear.weight', 'model.layers': 'model.text_model.layers', 'model.norm': 'model.text_model.norm', 'model.perceiver_resampler': 'model.connector.perceiver_resampler', 'model.modality_projection': 'model.connector.modality_projection'} WEIGHTS_TO_MERGE_MAPPING = ((('model.embed_tokens.weight', 'model.embed_tokens.additional_embedding.weight'), 'model.text_model.embed_tokens.weight'), (('lm_head.linear.weight', 'additional_fc.weight'), 'lm_head.weight')) def convert_state_dict_to_hf(state_dict): new_state_dict = {} for (key, value) in state_dict.items(): if key.endswith('.inv_freq'): continue for (key_to_modify, new_key) in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value return new_state_dict def merge_weights(state_dict): new_state_dict = copy.deepcopy(state_dict) for (weights_to_merge, new_weight_name) in WEIGHTS_TO_MERGE_MAPPING: for weight in weights_to_merge: assert weight in state_dict, f'Weight {weight} is missing in the state dict' if new_weight_name not in new_state_dict: new_state_dict[new_weight_name] = [state_dict[weight]] else: new_state_dict[new_weight_name].append(state_dict[weight]) new_state_dict[new_weight_name] = torch.cat(new_state_dict[new_weight_name], dim=0) for (weights_to_merge, new_weight_name) in WEIGHTS_TO_MERGE_MAPPING: for weight in weights_to_merge: if weight in new_state_dict and weight != new_weight_name: new_state_dict.pop(weight) return new_state_dict def get_config(checkpoint): if checkpoint == 'HuggingFaceM4/idefics2': config = AutoConfig.from_pretrained(checkpoint) text_config = MistralConfig(vocab_size=config.vocab_size + config.additional_vocab_size, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, num_hidden_layers=config.num_hidden_layers, num_attention_heads=config.num_attention_heads, num_key_value_heads=config.num_key_value_heads, hidden_act=config.hidden_act, max_position_embeddings=config.max_position_embeddings, initializer_range=config.initializer_range, rms_norm_eps=config.rms_norm_eps, tie_word_embeddings=config.tie_word_embeddings, rope_theta=config.rope_theta, sliding_window=config.sliding_window, attention_dropout=config.attention_dropout, pad_token_id=config.pad_token_id, bos_token_id=config.bos_token_id, eos_token_id=config.eos_token_id) perceiver_config = config.perceiver_config.to_dict() config = Idefics2Config(text_config=text_config.to_dict(), vision_config=config.vision_config, perceiver_config=perceiver_config, use_cache=config.use_cache, image_token_id=config.image_token_id, tie_word_embeddings=config.tie_word_embeddings) return config return AutoConfig.from_pretrained(checkpoint) def convert_idefics2_hub_to_hf(original_model_id, output_hub_path, push_to_hub): original_model = AutoModelForCausalLM.from_pretrained(original_model_id, trust_remote_code=True) image_seq_len = original_model.config.perceiver_config.resampler_n_latents image_processor = Idefics2ImageProcessor() tokenizer = AutoTokenizer.from_pretrained(original_model_id) processor = Idefics2Processor(image_processor=image_processor, tokenizer=tokenizer, image_seq_len=image_seq_len) state_dict = original_model.state_dict() state_dict = convert_state_dict_to_hf(state_dict) state_dict = merge_weights(state_dict) config = get_config(original_model_id) with init_empty_weights(): model = Idefics2ForConditionalGeneration(config) model.load_state_dict(state_dict, strict=True, assign=True) model.save_pretrained(output_hub_path) processor.save_pretrained(output_hub_path) if push_to_hub: model.push_to_hub(output_hub_path, private=True) processor.push_to_hub(output_hub_path, private=True) def main(): parser = argparse.ArgumentParser(epilog=EPILOG_TXT, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--original_model_id', help='Hub location of the text model') parser.add_argument('--output_hub_path', help='Location on the hub of the converted model') parser.add_argument('--push_to_hub', action='store_true', help='If set, the model will be pushed to the hub after conversion.') args = parser.parse_args() convert_idefics2_hub_to_hf(args.original_model_id, args.output_hub_path, args.push_to_hub) if __name__ == '__main__': main() # File: transformers-main/src/transformers/models/idefics2/image_processing_idefics2.py from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import PaddingMode, pad, resize, to_channel_dimension_format from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL from PIL import Image def get_resize_output_image_size(image, size, input_data_format) -> Tuple[int, int]: (height, width) = get_image_size(image, channel_dim=input_data_format) min_len = size['shortest_edge'] max_len = size['longest_edge'] aspect_ratio = width / height if width >= height and width > max_len: width = max_len height = int(width / aspect_ratio) elif height > width and height > max_len: height = max_len width = int(height * aspect_ratio) height = max(height, min_len) width = max(width, min_len) return (height, width) def make_list_of_images(images: ImageInput) -> List[List[np.ndarray]]: if is_valid_image(images): images = [[images]] elif isinstance(images, (list, tuple)) and len(images) > 0 and is_valid_image(images[0]): images = [images] elif isinstance(images, (list, tuple)) and len(images) > 0 and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]): pass else: raise ValueError('Invalid input type. Must be a single image, a list of images, or a list of batches of images.') return images def max_across_indices(values: Iterable[Any]) -> List[Any]: return [max(values_i) for values_i in zip(*values)] def get_max_height_width(images_list: List[List[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> List[int]: if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0]) image_sizes = [] for images in images_list: for image in images: image_sizes.append(get_image_size(image, channel_dim=input_data_format)) (max_height, max_width) = max_across_indices(image_sizes) return (max_height, max_width) def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def convert_to_rgb(image: ImageInput) -> ImageInput: if not isinstance(image, PIL.Image.Image): return image if image.mode == 'RGB': return image image_rgba = image.convert('RGBA') background = Image.new('RGBA', image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert('RGB') return alpha_composite class Idefics2ImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_convert_rgb: bool=True, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: bool=True, do_image_splitting: bool=False, **kwargs) -> None: super().__init__(**kwargs) self.do_convert_rgb = do_convert_rgb self.do_resize = do_resize self.size = size if size is not None else {'shortest_edge': 378, 'longest_edge': 980} self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_pad = do_pad self.do_image_splitting = do_image_splitting def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: if 'shortest_edge' in size and 'longest_edge' in size: size = get_resize_output_image_size(image, size, input_data_format) elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("size must be a dictionary with keys 'shortest_edge' and 'longest_edge' or 'height' and 'width'.") return resize(image, size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: (input_height, input_width) = get_image_size(image, channel_dim=input_data_format) (output_height, output_width) = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) return padded_image def pad(self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature: pad_size = get_max_height_width(images, input_data_format=input_data_format) batch_size = len(images) max_num_images = max((len(images_) for images_ in images)) input_data_format = infer_channel_dimension_format(images[0][0]) if input_data_format is None else input_data_format data_format = input_data_format if data_format is None else data_format def empty_image(size, input_data_format): if input_data_format == ChannelDimension.FIRST: return np.zeros((3, *size), dtype=np.uint8) elif input_data_format == ChannelDimension.LAST: return np.zeros((*size, 3), dtype=np.uint8) raise ValueError('Invalid channel dimension format.') padded_images_list = [[empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size)] padded_masks = [[np.zeros(pad_size) for _ in range(max_num_images)] for _ in range(batch_size)] for batch_idx in range(batch_size): for (sample_idx, image) in enumerate(images[batch_idx]): padded_images_list[batch_idx][sample_idx] = self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) padded_masks[batch_idx][sample_idx] = make_pixel_mask(image, output_size=pad_size, input_data_format=input_data_format) padded_masks = padded_masks if return_pixel_mask else None return (padded_images_list, padded_masks) def _crop(self, im: np.ndarray, w1: int, h1: int, w2: int, h2: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: if input_data_format == ChannelDimension.FIRST: return im[:, h1:h2, w1:w2] elif input_data_format == ChannelDimension.LAST: return im[h1:h2, w1:w2, :] def split_image(self, image: np.ndarray, input_data_format: Optional[Union[str, ChannelDimension]]=None): (height, width) = get_image_size(image, input_data_format) mid_width = width // 2 mid_height = height // 2 return [self._crop(image, 0, 0, mid_width, mid_height, input_data_format), self._crop(image, mid_width, 0, width, mid_height, input_data_format), self._crop(image, 0, mid_height, mid_width, height, input_data_format), self._crop(image, mid_width, mid_height, width, height, input_data_format), image] def preprocess(self, images: ImageInput, do_convert_rgb: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, do_image_splitting: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[ChannelDimension]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST): do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_pad = do_pad if do_pad is not None else self.do_pad do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting images_list = make_list_of_images(images) if not valid_images(images_list[0]): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images_list = [[convert_to_rgb(image) for image in images] for images in images_list] images_list = [[to_numpy_array(image) for image in images] for images in images_list] if is_scaled_image(images_list[0][0]) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0]) if do_image_splitting: new_images_list = [] for images in images_list: new_images = [] for image in images: new_images.extend(self.split_image(image, input_data_format)) new_images_list.append(new_images) images_list = new_images_list if do_resize: images_list = [[self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] for images in images_list] if do_rescale: images_list = [[self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in images_list] if do_normalize: images_list = [[self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] for images in images_list] pixel_attention_mask = None if do_pad: (images_list, pixel_attention_mask) = self.pad(images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format) if data_format is not None: images_list = [[to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] for images in images_list] data = {'pixel_values': np.array(images_list) if do_pad else images_list} if pixel_attention_mask is not None: data['pixel_attention_mask'] = np.array(pixel_attention_mask) if do_pad else pixel_attention_mask return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/idefics2/modeling_idefics2.py """""" import math from dataclasses import dataclass from typing import Dict, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, ModelOutput from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, is_torchdynamo_compiling, logging, replace_return_docstrings from ..auto import AutoModel from .configuration_idefics2 import Idefics2Config, Idefics2VisionConfig if is_flash_attn_2_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'Idefics2Config' @dataclass class Idefics2BaseModelOutputWithPast(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None @dataclass class Idefics2CausalLMOutputWithPast(ModelOutput): loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None class Idefics2VisionEmbeddings(nn.Module): def __init__(self, config: Idefics2VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding='valid') self.num_patches_per_side = self.image_size // self.patch_size self.num_patches = self.num_patches_per_side ** 2 self.num_positions = self.num_patches self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor: (batch_size, _, max_im_h, max_im_w) = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) (max_nb_patches_h, max_nb_patches_w) = (max_im_h // self.patch_size, max_im_w // self.patch_size) boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0) for (batch_idx, p_attn_mask) in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-06, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-06, 1 / nb_patches_w) bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings class Idefics2VisionAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) self.is_causal = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: (batch_size, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2) k_v_seq_len = key_states.shape[-2] attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len): raise ValueError(f'Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len): raise ValueError(f'Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) class Idefics2VisionFlashAttention2(Idefics2VisionAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: output_attentions = False (bsz, q_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.dropout if self.training else 0.0 input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, self.embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights) IDEFICS_VISION_ATTENTION_CLASSES = {'eager': Idefics2VisionAttention, 'flash_attention_2': Idefics2VisionFlashAttention2} class Idefics2VisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Idefics2MLP(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int, output_size: int, hidden_act: str): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, output_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Idefics2MultiheadAttentionPoolingHead(nn.Module): def __init__(self, config: Idefics2VisionConfig): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = Idefics2MLP(hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, output_size=config.hidden_size) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] class Idefics2EncoderLayer(nn.Module): def __init__(self, config: Idefics2VisionConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = IDEFICS_VISION_ATTENTION_CLASSES[config._attn_implementation](config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Idefics2VisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Idefics2Encoder(nn.Module): def __init__(self, config: Idefics2Config): super().__init__() self.config = config self.layers = nn.ModuleList([Idefics2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class Idefics2VisionTransformer(nn.Module): def __init__(self, config: Idefics2VisionConfig): super().__init__() embed_dim = config.hidden_size self.config = config self.embeddings = Idefics2VisionEmbeddings(config) self.encoder = Idefics2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings = value def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_size = self.config.patch_size patch_attention_mask = torch.ones((batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size)) patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device) hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) patch_attention_mask = patch_attention_mask.view(batch_size, -1) if not torch.any(~patch_attention_mask): patch_attention_mask = None elif not self._use_flash_attention_2: patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=patch_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) if not return_dict: return (last_hidden_state,) + encoder_outputs[1:] return BaseModelOutput(last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: (batch, num_key_value_heads, slen, head_dim) = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class Idefics2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}' class Idefics2PerceiverAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int]=None) -> None: super().__init__() self.layer_idx = None self.hidden_size = config.text_config.hidden_size self.num_heads = config.perceiver_config.resampler_n_heads self.head_dim = config.perceiver_config.resampler_head_dim self.num_key_value_heads = config.perceiver_config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.attention_dropout = config.perceiver_config.attention_dropout self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.is_causal = False def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = latents.size() kv_seq_len = q_len + context.size()[1] hidden_states = torch.concat([context, latents], dim=-2) query_states = self.q_proj(latents) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) past_key_value = getattr(self, 'past_key_value', past_key_value) if past_key_value is not None: (key_states, value_states) = past_key_value.update(key_states, value_states, self.layer_idx) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError(f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}') attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) class Idefics2PerceiverFlashAttention2(Idefics2PerceiverAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, **kwargs) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, q_len, _) = latents.size() kv_seq_len = q_len + context.size()[1] query_states = self.q_proj(latents) key_states = self.k_proj(torch.cat([context, latents], dim=-2)) value_states = self.v_proj(torch.cat([context, latents], dim=-2)) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) key_states = key_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, kv_seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] if past_key_value is not None: if hasattr(self.config, 'sliding_window') and kv_seq_len > self.config.sliding_window: slicing_tokens = kv_seq_len - self.config.sliding_window past_key = past_key_value[0] past_value = past_key_value[1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError(f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got {past_key.shape}') past_key_value = (past_key, past_value) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) dropout_rate = 0.0 if not self.training else self.attention_dropout input_dtype = query_states.dtype if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() elif hasattr(self.config, '_pre_quantization_dtype'): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.') query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, sliding_window=None, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask) attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights, past_key_value) IDEFICS2_PERCEIVER_ATTENTION_CLASSES = {'eager': Idefics2PerceiverAttention, 'flash_attention_2': Idefics2PerceiverFlashAttention2} class Idefics2PerceiverLayer(nn.Module): def __init__(self, config, layer_idx: int): super().__init__() self.hidden_size = config.text_config.hidden_size self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps self.input_latents_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.input_context_norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.self_attn = IDEFICS2_PERCEIVER_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.post_attention_layernorm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self.mlp = Idefics2MLP(hidden_size=config.text_config.hidden_size, intermediate_size=config.text_config.hidden_size * 4, output_size=config.text_config.hidden_size, hidden_act=config.perceiver_config.hidden_act) def forward(self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = latents latents = self.input_latents_norm(latents) context = self.input_context_norm(context) (latents, self_attn_weights, present_key_value) = self.self_attn(latents=latents, context=context, attention_mask=attention_mask) latents = residual + latents residual = latents latents = self.post_attention_layernorm(latents) latents = self.mlp(latents) latents = residual + latents outputs = (latents,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class Idefics2PerceiverResampler(nn.Module): def __init__(self, config) -> None: super().__init__() self.hidden_size = config.text_config.hidden_size self.hidden_act = config.perceiver_config.hidden_act self.n_latents = config.perceiver_config.resampler_n_latents self.depth = config.perceiver_config.resampler_depth self.rms_norm_eps = config.text_config.rms_norm_eps self.latents = nn.Parameter(torch.ones(self.n_latents, self.hidden_size)) self.layers = nn.ModuleList([Idefics2PerceiverLayer(config, idx) for idx in range(self.depth)]) self.norm = Idefics2RMSNorm(self.hidden_size, eps=self.rms_norm_eps) self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' def forward(self, context: torch.Tensor, attention_mask) -> torch.Tensor: latents = self.latents.unsqueeze(0).expand((context.shape[0], *self.latents.size())) latent_attention_mask = torch.ones((attention_mask.size(0), latents.size(1)), dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, latent_attention_mask], dim=-1) attention_mask = _prepare_4d_attention_mask(attention_mask, latents.dtype, tgt_len=self.n_latents) if not self._use_flash_attention_2 else attention_mask compressed_context = latents for perceiver_layer in self.layers: layer_outputs = perceiver_layer(compressed_context, context, attention_mask=attention_mask, position_ids=None, past_key_value=None, output_attentions=False, use_cache=False) compressed_context = layer_outputs[0] compressed_context = self.norm(compressed_context) return compressed_context class Idefics2Connector(nn.Module): def __init__(self, config): super().__init__() self.modality_projection = Idefics2MLP(hidden_size=config.vision_config.hidden_size, intermediate_size=config.text_config.intermediate_size, output_size=config.text_config.hidden_size, hidden_act=config.text_config.hidden_act) self.perceiver_resampler = Idefics2PerceiverResampler(config) def forward(self, image_hidden_states, attention_mask): image_hidden_states = self.modality_projection(image_hidden_states) image_hidden_states = self.perceiver_resampler(context=image_hidden_states, attention_mask=attention_mask) return image_hidden_states IDEFICS2_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`Idefics2Config`] or [`Idefics2VisionConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' @add_start_docstrings('The bare Idefics2 Model outputting raw hidden-states without any specific head on top.', IDEFICS2_START_DOCSTRING) class Idefics2PreTrainedModel(PreTrainedModel): config_class = Idefics2Config base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['Idefics2VisionAttention', 'Idefics2MLP', 'Idefics2PerceiverLayer', 'Idefics2DecoderLayer'] _skip_keys_device_placement = 'past_key_values' _supports_flash_attn_2 = True _supports_cache_class = True def _init_weights(self, module): std = self.config.text_config.initializer_range if hasattr(self.config, 'initializer_range') else self.config.text_config.initializer_range if hasattr(module, 'class_embedding'): module.class_embedding.data.normal_(mean=0.0, std=std) if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @classmethod def _autoset_attn_implementation(cls, config, use_flash_attention_2: bool=False, torch_dtype: Optional[torch.dtype]=None, device_map: Optional[Union[str, Dict[str, int]]]=None, check_device_map: bool=True, **kwargs): config = super()._autoset_attn_implementation(config=config, use_flash_attention_2=use_flash_attention_2, torch_dtype=torch_dtype, device_map=device_map, check_device_map=check_device_map, **kwargs) config.vision_config._attn_implementation = config._attn_implementation return config IDEFICS2_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\n The tensors corresponding to the input images. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses\n [`CLIPImageProcessor`] for processing images).\n pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):\n Mask to avoid performing attention on padding pixel indices.\n image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\n The hidden states of the image encoder after modality projection and perceiver resampling.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('Idefics2 model consisting of a SIGLIP vision encoder and Mistral language decoder', IDEFICS2_START_DOCSTRING) class Idefics2Model(Idefics2PreTrainedModel): def __init__(self, config: Idefics2Config): super().__init__(config) self.padding_idx = self.config.text_config.pad_token_id self.vocab_size = self.config.text_config.vocab_size self.vision_model = Idefics2VisionTransformer(config.vision_config) self.connector = Idefics2Connector(config) self.text_model = AutoModel.from_config(config.text_config, attn_implementation=config._attn_implementation) self.image_seq_len = config.perceiver_config.resampler_n_latents self.image_token_id = self.config.image_token_id self._use_flash_attention_2 = config._attn_implementation == 'flash_attention_2' self.post_init() def enable_input_require_grads(self): def get_lowest_module(module): if len(list(module.children())) == 0: return module else: return get_lowest_module(list(module.children())[0]) def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = get_lowest_module(self.vision_model).register_forward_hook(make_inputs_require_grads) def get_input_embeddings(self): return self.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.text_model.set_input_embeddings(value) def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None) -> nn.Embedding: model_embeds = self.text_model.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of) self.config.text_config.vocab_size = model_embeds.num_embeddings return model_embeds def inputs_merger(self, input_ids: torch.LongTensor, inputs_embeds: Optional[torch.Tensor], image_hidden_states: Optional[torch.Tensor]): (num_images, _, vision_hidden_size) = image_hidden_states.shape special_image_token_mask = input_ids == self.image_token_id new_inputs_embeds = inputs_embeds.clone() reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size) new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states return new_inputs_embeds @add_start_docstrings_to_model_forward("\n Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to\n the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where\n max_num_images is the maximum number of images among the batch_size samples in the batch.\n\n Padding images are not needed beyond padding the pixel_values at the entrance of the model.\n For efficiency, we only pass through the vision_model's forward the real images by\n discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where\n image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.\n ", IDEFICS2_INPUTS_DOCSTRING) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Idefics2BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training and self.text_model.gradient_checkpointing and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False if input_ids is not None: (batch_size, seq_length) = input_ids.shape elif inputs_embeds is not None: (batch_size, seq_length, _) = inputs_embeds.shape else: raise ValueError('You have to specify either input_ids or inputs_embeds') past_seen_tokens = 0 return_legacy_cache = False if use_cache: if not isinstance(past_key_values, Cache): return_legacy_cache = True past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_seen_tokens = past_key_values.get_seq_length() if inputs_embeds is not None and input_ids is None and (past_seen_tokens == 0): raise ValueError('When first calling the model, if input_embeds are passed, input_ids should not be None.') if inputs_embeds is None: inputs_embeds = self.text_model.get_input_embeddings()(input_ids) if pixel_values is not None and image_hidden_states is not None: raise ValueError('You cannot specify both pixel_values and image_hidden_states at the same time') elif pixel_values is not None: (batch_size, num_images, num_channels, height, width) = pixel_values.shape pixel_values = pixel_values.to(dtype=self.dtype) pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:]) nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image pixel_values = pixel_values[real_images_inds].contiguous() if pixel_attention_mask is None: pixel_attention_mask = torch.ones(size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)), dtype=torch.bool, device=pixel_values.device) else: pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:]) pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask).last_hidden_state image_hidden_states = self.connector(image_hidden_states, attention_mask=patch_attention_mask.view(pixel_values.size(0), -1)) elif image_hidden_states is not None: image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device) if past_seen_tokens == 0 and inputs_embeds is not None and (image_hidden_states is not None): inputs_embeds = self.inputs_merger(input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states) outputs = self.text_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if return_legacy_cache and use_cache: outputs.past_key_values = outputs.past_key_values.to_legacy_cache() if not return_dict: return tuple((v for v in [*outputs, image_hidden_states] if v is not None)) return Idefics2BaseModelOutputWithPast(last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states) @add_start_docstrings('The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. ', IDEFICS2_START_DOCSTRING) class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = Idefics2Model(config) self.image_token_id = self.config.image_token_id self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.vocab_size = config.text_config.vocab_size self.post_init() def enable_input_require_grads(self): def make_inputs_require_grads(module, input, output): output.requires_grad_(True) self._text_require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads) self._vision_require_grads_hook = self.model.vision_model.get_input_embeddings().register_forward_hook(make_inputs_require_grads) def get_input_embeddings(self): return self.model.text_model.get_input_embeddings() def set_input_embeddings(self, value): self.model.text_model.set_input_embeddings(value) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None) -> nn.Embedding: model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) if new_num_tokens is None and pad_to_multiple_of is None: return model_embeds self.config.text_config.vocab_size = model_embeds.weight.shape[0] self.vocab_size = self.config.text_config.vocab_size self.tie_weights() return model_embeds def tie_weights(self): output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, 'tie_word_embeddings', True): output_embeddings.weight = input_embeddings.weight @add_start_docstrings_to_model_forward(IDEFICS2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Idefics2CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, num_logits_to_keep: int=0) -> Union[Tuple, Idefics2CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = outputs[0] if labels is None and (not is_torchdynamo_compiling()): logger.warning_once('Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)') logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :]).float() loss = None if labels is not None: logits = logits.float() labels = labels.to(logits.device) if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:].to(logits.device) shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous() shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous() else: shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Idefics2CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, num_logits_to_keep=None, **kwargs): past_length = 0 if past_key_values is not None: past_length = past_key_values.get_seq_length() max_cache_length = past_key_values.get_max_length() if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: input_ids = input_ids[:, -(attention_mask.shape[1] - past_length):] elif past_length < input_ids.shape[1]: input_ids = input_ids[:, past_length:] if max_cache_length is not None and attention_mask is not None and (past_length + input_ids.shape[1] > max_cache_length): attention_mask = attention_mask[:, -max_cache_length:] position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] if inputs_embeds is not None and past_length == 0: model_inputs = {'inputs_embeds': inputs_embeds} else: model_inputs = {'input_ids': input_ids} if num_logits_to_keep is not None: model_inputs['num_logits_to_keep'] = num_logits_to_keep image_hidden_states = kwargs.get('image_hidden_states', None) if image_hidden_states is not None: pixel_values = None pixel_attention_mask = None else: pixel_values = kwargs.get('pixel_values', None) pixel_attention_mask = kwargs.get('pixel_attention_mask', None) model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'attention_mask': attention_mask, 'pixel_values': pixel_values, 'pixel_attention_mask': pixel_attention_mask, 'image_hidden_states': image_hidden_states}) return model_inputs def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs): model_kwargs = super()._update_model_kwargs_for_generation(outputs=outputs, model_kwargs=model_kwargs, is_encoder_decoder=is_encoder_decoder, **kwargs) model_kwargs['image_hidden_states'] = outputs.image_hidden_states return model_kwargs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)),) return reordered_past # File: transformers-main/src/transformers/models/idefics2/processing_idefics2.py """""" from typing import TYPE_CHECKING, List, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, is_valid_image, load_image from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy from ...utils import TensorType, logging if TYPE_CHECKING: from ...tokenization_utils_base import PreTokenizedInput logger = logging.get_logger(__name__) def is_url(val) -> bool: return isinstance(val, str) and val.startswith('http') def is_image_or_image_url(elem): return is_url(elem) or is_valid_image(elem) class Idefics2Processor(ProcessorMixin): attributes = ['image_processor', 'tokenizer'] valid_kwargs = ['image_seq_len', 'chat_template'] image_processor_class = 'Idefics2ImageProcessor' tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer=None, image_seq_len: int=64, chat_template: str=None, **kwargs): if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') self.fake_image_token = AddedToken('', normalized=False, special=True) self.image_token = AddedToken('', normalized=False, special=True) self.end_of_utterance_token = AddedToken('', normalized=False, special=True) self.image_seq_len = image_seq_len tokens_to_add = {'additional_special_tokens': [self.fake_image_token, self.image_token, self.end_of_utterance_token]} tokenizer.add_special_tokens(tokens_to_add) super().__init__(image_processor, tokenizer, chat_template=chat_template) def _extract_images_from_prompts(self, prompts): prompt_images = [] for prompt in prompts: images = [] for elem in prompt: if is_valid_image(elem): images.append(elem) elif is_url(elem): images.append(load_image(elem)) prompt_images.append(images) return prompt_images def __call__(self, text: Union[TextInput, 'PreTokenizedInput', List[TextInput], List['PreTokenizedInput']]=None, images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]]=None, image_seq_len: Optional[int]=None, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, is_split_into_words: bool=False, add_special_tokens: bool=True, return_tensors: Optional[Union[str, TensorType]]=None) -> BatchEncoding: image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len n_images_in_text = [] inputs = BatchFeature() if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise ValueError('Invalid input text. Please provide a string, or a list of strings') fake_image_token = self.fake_image_token.content image_token = self.image_token.content image_str = f'{fake_image_token}{image_token * image_seq_len}{fake_image_token}' if self.image_processor.do_image_splitting: image_str = image_str * 5 prompt_strings = [] for sample in text: n_images_in_text.append(sample.count(image_token)) sample = sample.replace(image_token, image_str) sample = sample.replace(f'{fake_image_token}{fake_image_token}', f'{fake_image_token}') prompt_strings.append(sample) text_inputs = self.tokenizer(text=prompt_strings, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, is_split_into_words=is_split_into_words, return_tensors=return_tensors) inputs.update(text_inputs) if images is not None: if is_image_or_image_url(images): images = [[images]] elif isinstance(images, list) and is_image_or_image_url(images[0]): images = [images] elif not isinstance(images, list) and (not isinstance(images[0], list)) and (not is_image_or_image_url(images[0][0])): raise ValueError('Invalid input images. Please provide a single image or a list of images or a list of list of images.') n_images_in_images = [len(sample) for sample in images] if text is not None and (not n_images_in_images == n_images_in_text): raise ValueError(f'The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same.') images = [[load_image(im) for im in sample] for sample in images] image_inputs = self.image_processor(images, return_tensors=return_tensors) inputs.update(image_inputs) return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) # File: transformers-main/src/transformers/models/imagegpt/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_imagegpt': ['ImageGPTConfig', 'ImageGPTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['feature_extraction_imagegpt'] = ['ImageGPTFeatureExtractor'] _import_structure['image_processing_imagegpt'] = ['ImageGPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_imagegpt'] = ['ImageGPTForCausalImageModeling', 'ImageGPTForImageClassification', 'ImageGPTModel', 'ImageGPTPreTrainedModel', 'load_tf_weights_in_imagegpt'] if TYPE_CHECKING: from .configuration_imagegpt import ImageGPTConfig, ImageGPTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_imagegpt import ImageGPTFeatureExtractor from .image_processing_imagegpt import ImageGPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_imagegpt import ImageGPTForCausalImageModeling, ImageGPTForImageClassification, ImageGPTModel, ImageGPTPreTrainedModel, load_tf_weights_in_imagegpt else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/imagegpt/configuration_imagegpt.py """""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType logger = logging.get_logger(__name__) class ImageGPTConfig(PretrainedConfig): model_type = 'imagegpt' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=512 + 1, n_positions=32 * 32, n_embd=512, n_layer=24, n_head=8, n_inner=None, activation_function='quick_gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, tie_word_embeddings=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx self.reorder_and_upcast_attn = reorder_and_upcast_attn self.tie_word_embeddings = tie_word_embeddings super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class ImageGPTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'})]) def generate_dummy_inputs(self, preprocessor: 'FeatureExtractionMixin', batch_size: int=1, seq_length: int=-1, is_pair: bool=False, framework: Optional['TensorType']=None, num_channels: int=3, image_width: int=32, image_height: int=32) -> Mapping[str, Any]: input_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) inputs = dict(preprocessor(images=input_image, return_tensors=framework)) return inputs # File: transformers-main/src/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py """""" import argparse import torch from transformers import ImageGPTConfig, ImageGPTForCausalLM, load_tf_weights_in_imagegpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def convert_imagegpt_checkpoint_to_pytorch(imagegpt_checkpoint_path, model_size, pytorch_dump_folder_path): MODELS = {'small': (512, 8, 24), 'medium': (1024, 8, 36), 'large': (1536, 16, 48)} (n_embd, n_head, n_layer) = MODELS[model_size] config = ImageGPTConfig(n_embd=n_embd, n_layer=n_layer, n_head=n_head) model = ImageGPTForCausalLM(config) load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path) pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}') torch.save(model.state_dict(), pytorch_weights_dump_path) print(f'Save configuration file to {pytorch_config_dump_path}') with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f: f.write(config.to_json_string()) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--imagegpt_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.') parser.add_argument('--model_size', default=None, type=str, required=True, help="Size of the model (can be either 'small', 'medium' or 'large').") parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.') args = parser.parse_args() convert_imagegpt_checkpoint_to_pytorch(args.imagegpt_checkpoint_path, args.model_size, args.pytorch_dump_folder_path) # File: transformers-main/src/transformers/models/imagegpt/feature_extraction_imagegpt.py """""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor logger = logging.get_logger(__name__) class ImageGPTFeatureExtractor(ImageGPTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ImageGPTImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs) # File: transformers-main/src/transformers/models/imagegpt/image_processing_imagegpt.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def squared_euclidean_distance(a, b): b = b.T a2 = np.sum(np.square(a), axis=1) b2 = np.sum(np.square(b), axis=0) ab = np.matmul(a, b) d = a2[:, None] - 2 * ab + b2[None, :] return d def color_quantize(x, clusters): x = x.reshape(-1, 3) d = squared_euclidean_distance(x, clusters) return np.argmin(d, axis=1) class ImageGPTImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, clusters: Optional[Union[List[List[int]], np.ndarray]]=None, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_normalize: bool=True, do_color_quantize: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 256, 'width': 256} size = get_size_dict(size) self.clusters = np.array(clusters) if clusters is not None else None self.do_resize = do_resize self.size = size self.resample = resample self.do_normalize = do_normalize self.do_color_quantize = do_color_quantize def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def normalize(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format) image = image - 1 return image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[str, int]=None, resample: PILImageResampling=None, do_normalize: bool=None, do_color_quantize: Optional[bool]=None, clusters: Optional[Union[List[List[int]], np.ndarray]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[Union[str, ChannelDimension]]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize clusters = clusters if clusters is not None else self.clusters clusters = np.array(clusters) images = make_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') validate_preprocess_arguments(do_resize=do_resize, size=size, resample=resample) if do_color_quantize and clusters is None: raise ValueError('Clusters must be specified if do_color_quantize is True.') images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_normalize: logger.warning_once('It looks like you are trying to rescale already rescaled images. If you wish to do this, make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, input_data_format=input_data_format) for image in images] if do_color_quantize: images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images] images = np.array(images) images = color_quantize(images, clusters).reshape(images.shape[:-1]) batch_size = images.shape[0] images = images.reshape(batch_size, -1) images = list(images) else: images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'input_ids': images} return BatchFeature(data=data, tensor_type=return_tensors) # File: transformers-main/src/transformers/models/imagegpt/modeling_imagegpt.py """""" import math import os import warnings from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.cuda.amp import autocast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast from ...modeling_utils import PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_float from .configuration_imagegpt import ImageGPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai/imagegpt-small' _CONFIG_FOR_DOC = 'ImageGPTConfig' def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path): try: import re import tensorflow as tf except ImportError: logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.') raise tf_path = os.path.abspath(imagegpt_checkpoint_path) logger.info('Converting TensorFlow checkpoint from {}'.format(tf_path)) init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for (name, shape) in init_vars: logger.info('Loading TF weight {} with shape {}'.format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for (name, array) in zip(names, arrays): name = name[6:] name = name.split('/') if any((n in ['adam_v', 'adam_m', 'AdamWeightDecayOptimizer', 'AdamWeightDecayOptimizer_1', 'global_step'] for n in name)) or name[-1] in ['_step']: logger.info('Skipping {}'.format('/'.join(name))) continue pointer = model if name[-1] not in ['wtet']: pointer = getattr(pointer, 'transformer') for m_name in name: if re.fullmatch('[A-Za-z]+\\d+', m_name): scope_names = re.split('(\\d+)', m_name) else: scope_names = [m_name] if scope_names[0] == 'w' or scope_names[0] == 'g': pointer = getattr(pointer, 'weight') elif scope_names[0] == 'b': pointer = getattr(pointer, 'bias') elif scope_names[0] == 'wpe' or scope_names[0] == 'wte': pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') elif scope_names[0] in ['q_proj', 'k_proj', 'v_proj']: pointer = getattr(pointer, 'c_attn') pointer = getattr(pointer, 'weight') elif len(name) == 3 and name[1] == 'attn' and (scope_names[0] == 'c_proj'): pointer = getattr(pointer, scope_names[0]) pointer = getattr(pointer, 'weight') elif scope_names[0] == 'wtet': pointer = getattr(pointer, 'lm_head') pointer = getattr(pointer, 'weight') elif scope_names[0] == 'sos': pointer = getattr(pointer, 'wte') pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if len(name) > 1 and name[1] == 'attn' or name[-1] == 'wtet' or name[-1] == 'sos' or (name[-1] == 'wte'): pass else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info('Initialize PyTorch weight {}'.format(name)) if name[-1] == 'q_proj': pointer.data[:, :config.n_embd] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T elif name[-1] == 'k_proj': pointer.data[:, config.n_embd:2 * config.n_embd] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T elif name[-1] == 'v_proj': pointer.data[:, 2 * config.n_embd:] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T elif len(name) == 3 and name[1] == 'attn' and (name[2] == 'c_proj'): pointer.data = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)) elif name[-1] == 'wtet': pointer.data = torch.from_numpy(array) elif name[-1] == 'wte': pointer.data[:config.vocab_size - 1, :] = torch.from_numpy(array) elif name[-1] == 'sos': pointer.data[-1] = torch.from_numpy(array) else: pointer.data = torch.from_numpy(array) return model class ImageGPTLayerNorm(nn.Module): def __init__(self, hidden_size: Tuple[int], eps: float=1e-05): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.Tensor(hidden_size)) def forward(self, tensor: torch.Tensor) -> tuple: return tensor / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps) * self.weight.data[..., :] class ImageGPTAttention(nn.Module): def __init__(self, config, is_cross_attention: Optional[bool]=False, layer_idx: Optional[int]=None): super().__init__() max_positions = config.max_position_embeddings self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False) self.register_buffer('masked_bias', torch.tensor(-10000.0), persistent=False) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx self.layer_idx = layer_idx self.reorder_and_upcast_attn = config.reorder_and_upcast_attn if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size]) self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) self.split_size = self.split_size // self.num_heads * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, query, key, value, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch_float(value.size(-1) ** 0.5) if self.scale_attn_by_inverse_layer_idx: attn_weights = attn_weights / float(self.layer_idx + 1) if not self.is_cross_attention: (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.Softmax(dim=-1)(attn_weights) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None): (bsz, num_heads, q_seq_len, dk) = query.size() (_, _, k_seq_len, _) = key.size() attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 if self.scale_attn_by_inverse_layer_idx: scale_factor /= float(self.layer_idx + 1) with autocast(enabled=False): (q, k) = (query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) if not self.is_cross_attention: (query_length, key_length) = (query.size(-2), key.size(-2)) causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.Softmax(dim=-1)(attn_weights) if attn_weights.dtype != torch.float32: raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32') attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return (attn_output, attn_weights) def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(*new_shape) return tensor.permute(0, 2, 1, 3) def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward(self, hidden_states: torch.Tensor, layer_past: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> tuple: if encoder_hidden_states is not None: if not hasattr(self, 'q_attn'): raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`.') query = self.q_attn(hidden_states) (key, value) = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: (query, key, value) = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: (past_key, past_value) = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None if self.reorder_and_upcast_attn: (attn_output, attn_weights) = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask) else: (attn_output, attn_weights) = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs class ImageGPTMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ImageGPTBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = ImageGPTAttention(config, layer_idx=layer_idx) self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = ImageGPTMLP(inner_dim, config) def forward(self, hidden_states: torch.Tensor, layer_past: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> tuple: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions) attn_output = attn_outputs[0] outputs = attn_outputs[1:] hidden_states = attn_output + residual if encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions) attn_output = cross_attn_outputs[0] hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states outputs = (hidden_states,) + (outputs if use_cache else outputs[1:]) return outputs class ImageGPTPreTrainedModel(PreTrainedModel): config_class = ImageGPTConfig load_tf_weights = load_tf_weights_in_imagegpt base_model_prefix = 'transformer' main_input_name = 'input_ids' supports_gradient_checkpointing = True _no_split_modules = ['ImageGPTBlock'] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): if isinstance(module, (nn.Linear, Conv1D)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, ImageGPTLayerNorm): module.weight.data.fill_(1.0) for (name, p) in module.named_parameters(): if 'c_proj' in name and 'weight' in name: p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) IMAGEGPT_START_DOCSTRING = '\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`ImageGPTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' IMAGEGPT_INPUTS_DOCSTRING = "\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.\n\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings('The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top.', IMAGEGPT_START_DOCSTRING) class ImageGPTModel(ImageGPTPreTrainedModel): def __init__(self, config: ImageGPTConfig): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.model_parallel = False self.device_map = None self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: if 'pixel_values' in kwargs: warnings.warn('The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids` instead.', FutureWarning) if input_ids is not None: raise ValueError('You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`.') input_ids = kwargs.pop('pixel_values') output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError('You have to specify either input_ids or inputs_embeds') device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if attention_mask is not None: if batch_size <= 0: raise ValueError('batch_size has to be defined and > 0') attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = attention_mask.to(dtype=self.dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min if self.config.add_cross_attention and encoder_hidden_states is not None: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for (i, (block, layer_past)) in enumerate(zip(self.h, past_key_values)): if self.model_parallel: torch.cuda.set_device(hidden_states.device) if layer_past is not None: layer_past = tuple((past_state.to(hidden_states.device) for past_state in layer_past)) if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func(block.__call__, hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions) else: outputs = block(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) if self.model_parallel: for (k, v) in self.device_map.items(): if i == v[-1] and 'cuda:' + str(k) != self.last_device: hidden_states = hidden_states.to('cuda:' + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(*output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) @add_start_docstrings('\n The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ', IMAGEGPT_START_DOCSTRING) class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: ImageGPTConfig): super().__init__(config) self.transformer = ImageGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False) self.model_parallel = False self.device_map = None self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[bool]=None, **kwargs): token_type_ids = kwargs.get('token_type_ids', None) if past_key_values: past_length = past_key_values[0][0].shape[2] if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -input_ids.shape[1]:] attention_mask = kwargs.get('attention_mask', None) position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1]:] else: position_ids = None return {'input_ids': input_ids, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache'), 'position_ids': position_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids} @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: if 'pixel_values' in kwargs: warnings.warn('The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids` instead.', FutureWarning) if input_ids is not None: raise ValueError('You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`.') input_ids = kwargs.pop('pixel_values') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions) @staticmethod def _reorder_cache(past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: return tuple((tuple((past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)) for layer_past in past_key_values)) @add_start_docstrings('\n The ImageGPT Model transformer with an image classification head on top (linear layer).\n [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.\n ', IMAGEGPT_START_DOCSTRING) class ImageGPTForImageClassification(ImageGPTPreTrainedModel): def __init__(self, config: ImageGPTConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = ImageGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) self.post_init() @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[Tuple, SequenceClassifierOutputWithPast]: if 'pixel_values' in kwargs: warnings.warn('The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids` instead.', FutureWarning) if input_ids is not None: raise ValueError('You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`.') input_ids = kwargs.pop('pixel_values') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] pooled_hidden_states = hidden_states.mean(dim=1) logits = self.score(pooled_hidden_states) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) # File: transformers-main/src/transformers/models/informer/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_informer': ['InformerConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_informer'] = ['InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel'] if TYPE_CHECKING: from .configuration_informer import InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import InformerForPrediction, InformerModel, InformerPreTrainedModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/informer/configuration_informer.py """""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class InformerConfig(PretrainedConfig): model_type = 'informer' attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers'} def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: List[int]=None, scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_real_features: int=0, num_static_categorical_features: int=0, num_time_features: int=0, cardinality: Optional[List[int]]=None, embedding_dimension: Optional[List[int]]=None, d_model: int=64, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', dropout: float=0.05, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, attention_type: str='prob', sampling_factor: int=5, distil: bool=True, **kwargs): self.prediction_length = prediction_length self.context_length = context_length or prediction_length self.distribution_output = distribution_output self.loss = loss self.input_size = input_size self.num_time_features = num_time_features self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] self.scaling = scaling self.num_dynamic_real_features = num_dynamic_real_features self.num_static_real_features = num_static_real_features self.num_static_categorical_features = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(cardinality) != num_static_categorical_features: raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`') self.cardinality = cardinality else: self.cardinality = [0] if embedding_dimension and num_static_categorical_features > 0: if len(embedding_dimension) != num_static_categorical_features: raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`') self.embedding_dimension = embedding_dimension else: self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] self.num_parallel_samples = num_parallel_samples self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features self.d_model = d_model self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.encoder_ffn_dim = encoder_ffn_dim self.decoder_ffn_dim = decoder_ffn_dim self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.activation_function = activation_function self.init_std = init_std self.use_cache = use_cache self.attention_type = attention_type self.sampling_factor = sampling_factor self.distil = distil super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def _number_of_features(self) -> int: return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # File: transformers-main/src/transformers/models/informer/modeling_informer.py """""" from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput from ...modeling_utils import PreTrainedModel from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_informer import InformerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = 'InformerConfig' class InformerFeatureEmbedder(nn.Module): def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for (c, d) in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat([embed(cat_feature_slice.squeeze(-1)) for (embed, cat_feature_slice) in zip(self.embedders, cat_feature_slices)], dim=-1) class InformerStdScaler(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05 def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return ((data - loc) / scale, loc, scale) class InformerMeanScaler(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10 self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) scale = torch.where(num_observed > 0, scale, default_scale) scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return (scaled_data, torch.zeros_like(scale), scale) class InformerNOPScaler(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1 self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return (data, loc, scale) def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) -> torch.Tensor: if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: return -input.log_prob(target) class InformerSinusoidalPositionalEmbedding(nn.Embedding): def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None: super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter) -> nn.Parameter: (n_pos, dim) = out.shape position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out.requires_grad = False sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0) -> torch.Tensor: (bsz, seq_len) = input_ids_shape[:2] positions = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device) return super().forward(positions) class InformerValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) class InformerAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[InformerConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class InformerProbSparseAttention(nn.Module): def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, sampling_factor: int=5, bias: bool=True): super().__init__() self.factor = sampling_factor self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: is_cross_attention = key_value_states is not None (bsz, tgt_len, _) = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling if is_cross_attention and past_key_value is not None and (past_key_value[0].shape[2] == key_value_states.shape[1]): key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) key_states_time_length = key_states.size(1) log_key_states_time_length = np.ceil(np.log1p(key_states_time_length)).astype('int').item() query_states_time_length = query_states.size(1) log_query_states_time_length = np.ceil(np.log1p(query_states_time_length)).astype('int').item() u_part = min(self.factor * query_states_time_length * log_key_states_time_length, key_states_time_length) u = min(self.factor * log_query_states_time_length, query_states_time_length) if key_states_time_length > 0: index_sample = torch.randint(0, key_states_time_length, (u_part,)) k_sample = key_states[:, index_sample, :] else: k_sample = key_states queries_keys_sample = torch.bmm(query_states, k_sample.transpose(1, 2)) if u > 0: sparsity_measurement = queries_keys_sample.max(dim=-1)[0] - torch.div(queries_keys_sample.sum(dim=-1), key_states_time_length) top_u_sparsity_measurement = sparsity_measurement.topk(u, sorted=False)[1] dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1) q_reduce = query_states[dim_for_slice, top_u_sparsity_measurement] else: q_reduce = query_states top_u_sparsity_measurement = None attn_weights = torch.bmm(q_reduce, key_states.transpose(1, 2)) src_len = key_states.size(1) if attn_weights.size() != (bsz * self.num_heads, u, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, u, src_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') prob_mask = attention_mask.expand(bsz, self.num_heads, tgt_len, src_len).reshape(bsz * self.num_heads, tgt_len, src_len) if top_u_sparsity_measurement is not None: dim_for_slice = torch.arange(prob_mask.size(0)).unsqueeze(-1) prob_mask = prob_mask[dim_for_slice, top_u_sparsity_measurement, :] attn_weights = attn_weights.view(bsz, self.num_heads, u, src_len) + prob_mask.view(bsz, self.num_heads, u, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}') attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, u, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, u, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, u, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if self.is_decoder: context = value_states.cumsum(dim=-2, dtype=torch.float32).to(value_states.dtype) else: v_mean_dim_time = value_states.mean(dim=-2) context = v_mean_dim_time.unsqueeze(dim=1).expand(bsz * self.num_heads, query_states_time_length, v_mean_dim_time.size(-1)).clone() if top_u_sparsity_measurement is not None: dim_for_slice = torch.arange(context.size(0)).unsqueeze(-1) context[dim_for_slice, top_u_sparsity_measurement, :] = attn_output attn_output = context if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped, past_key_value) class InformerConvLayer(nn.Module): def __init__(self, c_in): super().__init__() self.downConv = nn.Conv1d(in_channels=c_in, out_channels=c_in, kernel_size=3, padding=1, padding_mode='circular') self.norm = nn.BatchNorm1d(c_in) self.activation = nn.ELU() self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) def forward(self, x): x = self.downConv(x.permute(0, 2, 1)) x = self.norm(x) x = self.activation(x) x = self.maxPool(x) x = x.transpose(1, 2) return x class InformerEncoderLayer(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.embed_dim = config.d_model if config.attention_type == 'prob': self.self_attn = InformerProbSparseAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor) else: self.self_attn = InformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: residual = hidden_states (hidden_states, attn_weights, _) = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class InformerDecoderLayer(nn.Module): def __init__(self, config: InformerConfig): super().__init__() self.embed_dim = config.d_model if config.attention_type == 'prob': self.self_attn = InformerProbSparseAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor, is_decoder=True) else: self.self_attn = InformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = InformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None (hidden_states, self_attn_weights, present_key_value) = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None (hidden_states, cross_attn_weights, cross_attn_present_key_value) = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) present_key_value = present_key_value + cross_attn_present_key_value residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class InformerPreTrainedModel(PreTrainedModel): config_class = InformerConfig base_model_prefix = 'model' main_input_name = 'past_values' supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding) and (not isinstance(module, InformerSinusoidalPositionalEmbedding)): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() INFORMER_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`TimeSeriesTransformerConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' INFORMER_INPUTS_DOCSTRING = '\n Args:\n past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):\n Past values of the time series, that serve as context in order to predict the future. The sequence size of\n this tensor must be larger than the `context_length` of the model, since the model will use the larger size\n to construct lag features, i.e. additional values from the past which are added in order to serve as "extra\n context".\n\n The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no\n `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest\n look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of\n the past.\n\n The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as\n `static_categorical_features`, `static_real_features`, `past_time_features` and lags).\n\n Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.\n\n For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of\n variates in the time series per time step.\n past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):\n Required time features, which the model internally will add to `past_values`. These could be things like\n "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called "age" features, which basically help the model know "at which point in life" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step. Holiday features are also a good example of time features.\n\n These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features. The Time Series Transformer only learns\n additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features\n must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in\n `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):\n Optional static categorical features for which the model will learn an embedding, which it will add to the\n values of the time series.\n\n Static categorical features are features which have the same value for all time steps (static over time).\n\n A typical example of a static categorical feature is a time series ID.\n static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):\n Optional static real features which the model will add to the values of the time series.\n\n Static real features are features which have the same value for all time steps (static over time).\n\n A typical example of a static real feature is promotion information.\n future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):\n Future values of the time series, that serve as labels for the model. The `future_values` is what the\n Transformer needs during training to learn to output, given the `past_values`.\n\n The sequence length here is equal to `prediction_length`.\n\n See the demo notebook and code snippets for details.\n\n Optionally, during training any missing values need to be replaced with zeros and indicated via the\n `future_observed_mask`.\n\n For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of\n variates in the time series per time step.\n future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):\n Required time features for the prediction window, which the model internally will add to `future_values`.\n These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as\n Fourier features). These could also be so-called "age" features, which basically help the model know "at\n which point in life" a time-series is. Age features have small values for distant past time steps and\n increase monotonically the more we approach the current time step. Holiday features are also a good example\n of time features.\n\n These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features. The Time Series Transformer only learns\n additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features\n must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\n future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):\n Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n This mask is used to filter out missing values for the final loss calculation.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to\n make sure the model can only look at previous inputs in order to predict the future.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don\'t have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model\'s internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class InformerEncoder(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.gradient_checkpointing = False if config.prediction_length is None: raise ValueError('The `prediction_length` config needs to be specified.') self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = InformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model) self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) if config.distil: self.conv_layers = nn.ModuleList([InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)]) self.conv_layers.append(None) else: self.conv_layers = [None] * config.encoder_layers self.post_init() def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, (encoder_layer, conv_layer)) in enumerate(zip(self.layers, self.conv_layers)): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions) if conv_layer is not None: output = self._gradient_checkpointing_func(conv_layer, layer_outputs[0]) layer_outputs = (output,) + layer_outputs[1:] else: layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions) if conv_layer is not None: output = conv_layer(layer_outputs[0]) layer_outputs = (output,) + layer_outputs[1:] hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class InformerDecoder(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError('The `prediction_length` config needs to be specified.') self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = InformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model) self.layers = nn.ModuleList([InformerDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False self.post_init() def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None next_decoder_cache = () if use_cache else None for (attn_mask, mask_name) in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for (idx, decoder_layer) in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache) else: layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) @add_start_docstrings('The bare Informer Model outputting raw hidden-states without any specific head on top.', INFORMER_START_DOCSTRING) class InformerModel(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) if config.scaling == 'mean' or config.scaling is True: self.scaler = InformerMeanScaler(config) elif config.scaling == 'std': self.scaler = InformerStdScaler(config) else: self.scaler = InformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = InformerFeatureEmbedder(cardinalities=config.cardinality, embedding_dims=config.embedding_dimension) self.encoder = InformerEncoder(config) self.decoder = InformerDecoder(config) self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor: sequence_length = sequence.shape[1] indices = [lag - shift for lag in self.config.lags_sequence] if max(indices) + subsequences_length > sequence_length: raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}') lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) return torch.stack(lagged_values, dim=-1) def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None): time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...] if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length:] observed_context = past_observed_mask[:, -self.config.context_length:] (_, loc, scale) = self.scaler(context, observed_context) inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p() log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) features = torch.cat((expanded_static_feat, time_feat), dim=-1) subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match') transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1) return (transformer_inputs, loc, scale, static_feat) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Seq2SeqTSModelOutput, Tuple]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict (transformer_inputs, loc, scale, static_feat) = self.create_network_inputs(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features) if encoder_outputs is None: enc_input = transformer_inputs[:, :self.config.context_length, ...] encoder_outputs = self.encoder(inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) dec_input = transformer_inputs[:, self.config.context_length:, ...] decoder_outputs = self.decoder(inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return Seq2SeqTSModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat) @add_start_docstrings('The Informer Model with a distribution head on top for time-series forecasting.', INFORMER_START_DOCSTRING) class InformerForPrediction(InformerPreTrainedModel): def __init__(self, config: InformerConfig): super().__init__(config) self.model = InformerModel(config) if config.distribution_output == 'student_t': self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == 'normal': self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == 'negative_binomial': self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f'Unknown distribution output {config.distribution_output}') self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model) self.target_shape = self.distribution_output.event_shape if config.loss == 'nll': self.loss = nll else: raise ValueError(f'Unknown loss function {config.loss}') self.post_init() def output_params(self, dec_output): return self.parameter_projection(dec_output) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @add_start_docstrings_to_model_forward(INFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSModelOutput, config_class=_CONFIG_FOR_DOC) def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Seq2SeqTSModelOutput, Tuple]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict) prediction_loss = None params = None if future_values is not None: params = self.output_params(outputs[0]) distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: (loss_weights, _) = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = (params,) + outputs[1:] if params is not None else outputs[1:] return (prediction_loss,) + outputs if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features) @torch.no_grad() def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput: outputs = self(static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = (past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc) / repeated_scale expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1) features = torch.cat((expanded_static_feat, future_time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) future_samples = [] for k in range(self.config.prediction_length): lagged_sequence = self.model.get_lagged_subsequences(sequence=repeated_past_values, subsequences_length=1 + k, shift=1) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, :k + 1]), dim=-1) dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) dec_last_hidden = dec_output.last_hidden_state params = self.parameter_projection(dec_last_hidden[:, -1:]) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) next_sample = distr.sample() repeated_past_values = torch.cat((repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1) future_samples.append(next_sample) concat_future_samples = torch.cat(future_samples, dim=1) return SampleTSPredictionOutput(sequences=concat_future_samples.reshape((-1, num_parallel_samples, self.config.prediction_length) + self.target_shape)) # File: transformers-main/src/transformers/models/instructblip/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {'configuration_instructblip': ['InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig'], 'processing_instructblip': ['InstructBlipProcessor']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_instructblip'] = ['InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel'] if TYPE_CHECKING: from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/instructblip/configuration_instructblip.py """""" import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class InstructBlipVisionConfig(PretrainedConfig): model_type = 'instructblip_vision_model' def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.qkv_bias = qkv_bias @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'instructblip': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class InstructBlipQFormerConfig(PretrainedConfig): model_type = 'instructblip_qformer' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.cross_attention_frequency = cross_attention_frequency self.encoder_hidden_size = encoder_hidden_size @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'instructblip': config_dict = config_dict['qformer_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class InstructBlipConfig(PretrainedConfig): model_type = 'instructblip' def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_token_index=None, **kwargs): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.') if qformer_config is None: qformer_config = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.') if text_config is None: text_config = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') self.vision_config = InstructBlipVisionConfig(**vision_config) self.qformer_config = InstructBlipQFormerConfig(**qformer_config) text_model_type = text_config['model_type'] if 'model_type' in text_config else 'opt' self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.tie_word_embeddings = self.text_config.tie_word_embeddings self.is_encoder_decoder = self.text_config.is_encoder_decoder self.num_query_tokens = num_query_tokens self.image_token_index = image_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVisionConfig, qformer_config: InstructBlipQFormerConfig, text_config: PretrainedConfig, **kwargs): return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/instructblip/convert_instructblip_original_to_pytorch.py """""" import argparse import requests import torch from lavis.models import load_model_and_preprocess from PIL import Image from transformers import AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, T5Config, T5TokenizerFast from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def load_demo_image(): url = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') return image def create_rename_keys(config): rename_keys = [] rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding')) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding')) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight')) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias')) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight')) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_v_bias(state_dict, config): for i in range(config.vision_config.num_hidden_layers): q_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias') qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) state_dict[f'vision_model.encoder.layers.{i}.self_attn.qkv.bias'] = qkv_bias def get_blip2_config(model_name): image_size = 364 if 'coco' in model_name else 224 vision_config = InstructBlipVisionConfig(image_size=image_size).to_dict() if 't5-xl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 't5-xxl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xxl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 'vicuna-7b' in model_name: text_config = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf', vocab_size=32001).to_dict() elif 'vicuna-13b' in model_name: text_config = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf', vocab_size=32001).to_dict() else: raise ValueError('Model name not supported') qformer_config = InstructBlipQFormerConfig(vocab_size=30523).to_dict() config = InstructBlipConfig(vision_config=vision_config, text_config=text_config, qformer_config=qformer_config) return (config, image_size) @torch.no_grad() def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): qformer_tokenizer = AutoTokenizer.from_pretrained('google-bert/bert-base-uncased', truncation_side='left') qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'}) if 't5' in model_name: tokenizer = T5TokenizerFast.from_pretrained('google/flan-t5-xl', truncation_side='left') elif 'vicuna' in model_name: tokenizer = LlamaTokenizerFast.from_pretrained('huggyllama/llama-7b', truncation_side='left', bos_token='', unk_token='') tokenizer.add_special_tokens({'pad_token': '[PAD]'}) (config, image_size) = get_blip2_config(model_name) hf_model = InstructBlipForConditionalGeneration(config).eval() model_name_to_original = {'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl')} (name, type) = model_name_to_original[model_name] print('Loading original model...') hf_model_device = 'cuda:1' if torch.cuda.is_available() else 'cpu' lavis_device = 'cuda:2' if torch.cuda.is_available() else 'cpu' (original_model, vis_processors, _) = load_model_and_preprocess(name=name, model_type=type, is_eval=True, device=lavis_device) original_model.eval() print('Done!') state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) for (key, val) in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith('Qformer.bert'): key = key.replace('Qformer.bert', 'qformer') if 'attention.self' in key: key = key.replace('self', 'attention') if 'llm_proj' in key: key = key.replace('llm_proj', 'language_projection') if 't5_proj' in key: key = key.replace('t5_proj', 'language_projection') if key.startswith('llm_model'): key = key.replace('llm_model', 'language_model') if key.startswith('t5'): key = key.replace('t5', 'language') state_dict[key] = val read_in_q_v_bias(state_dict, config) hf_model.load_state_dict(state_dict, strict=True) image = load_demo_image() prompt = 'What is unusual about this image?' image_processor = BlipImageProcessor(size={'height': image_size, 'width': image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD) processor = InstructBlipProcessor(image_processor=image_processor, tokenizer=tokenizer, qformer_tokenizer=qformer_tokenizer) inputs = processor(images=image, text=prompt, return_tensors='pt').to(hf_model_device) original_pixel_values = vis_processors['eval'](image).unsqueeze(0).to(lavis_device) pixel_values = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device), pixel_values) original_model.to(lavis_device) hf_model.to(hf_model_device) with torch.no_grad(): if 'vicuna' in model_name: original_logits = original_model({'image': original_pixel_values, 'text_input': [prompt]}).logits logits = hf_model(**inputs).logits else: original_logits = original_model({'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']}).logits label_input_ids = tokenizer('\n', return_tensors='pt').input_ids.to(hf_model_device) labels = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100) logits = hf_model(**inputs, labels=labels).logits print('First values of original logits:', original_logits[0, :3, :3]) print('First values of HF logits:', logits[0, :3, :3]) assert original_logits.shape == logits.shape atol = 0.0001 if 'vicuna' in model_name else 1e-05 assert torch.allclose(original_logits.to(logits.device), logits, atol=atol) print('Looks ok!') print('Generating with original model...') original_outputs = original_model.generate({'image': original_pixel_values, 'prompt': prompt}, num_beams=5) print('Generating with HF model...') outputs = hf_model.generate(**inputs, do_sample=False, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1) if 'vicuna' in model_name: outputs[outputs == 0] = 2 print('Original generation:', original_outputs) output_text = processor.batch_decode(outputs, skip_special_tokens=True) output_text = [text.strip() for text in output_text] print('HF generation:', output_text) if pytorch_dump_folder_path is not None: processor.save_pretrained(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: processor.push_to_hub(f'Salesforce/{model_name}') hf_model.push_to_hub(f'Salesforce/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() choices = ['instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl'] parser.add_argument('--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting') args = parser.parse_args() convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/instructblip/modeling_instructblip.py """""" import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'Salesforce/instructblip-flan-t5-xl' @dataclass class InstructBlipForConditionalGenerationModelOutput(ModelOutput): loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys())) class InstructBlipVisionEmbeddings(nn.Module): def __init__(self, config: InstructBlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, _, height, width) = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings class InstructBlipAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) (query_states, key_states, value_states) = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs class InstructBlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class InstructBlipEncoderLayer(nn.Module): def __init__(self, config: InstructBlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = InstructBlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = InstructBlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class InstructBlipPreTrainedModel(PreTrainedModel): config_class = InstructBlipConfig base_model_prefix = 'blip' supports_gradient_checkpointing = True _no_split_modules = ['InstructBlipQFormerEmbeddings', 'InstructBlipAttention', 'InstructBlipQFormerMultiHeadAttention', 'InstructBlipQFormerSelfOutput'] _keep_in_fp32_modules = [] def _init_weights(self, module): factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() if isinstance(module, InstructBlipVisionEmbeddings): if hasattr(self.config, 'vision_config') and (not isinstance(self.config, InstructBlipVisionConfig)): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() INSTRUCTBLIP_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`InstructBlipConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' INSTRUCTBLIP_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See\n [`InstructBlipProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' INSTRUCTBLIP_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`InstructBlipProcessor`]. See\n [`InstructBlipProcessor.__call__`] for details.\n\n qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided\n to serve as text prompt, which the Q-Former model will encode.\n\n Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for\n details.\n\n [What are input IDs?](../glossary#input-ids)\n\n qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be\n provided to serve as text prompt, which the language model can continue.\n\n Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for\n details.\n\n [What are input IDs?](../glossary#input-ids)\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an\n encoder-decoder language model (like T5) is used.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n Only relevant in case an encoder-decoder language model (like T5) is used.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' class InstructBlipEncoder(nn.Module): def __init__(self, config: InstructBlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) class InstructBlipVisionModel(InstructBlipPreTrainedModel): main_input_name = 'pixel_values' config_class = InstructBlipVisionConfig def __init__(self, config: InstructBlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = InstructBlipVisionEmbeddings(config) self.encoder = InstructBlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(INSTRUCTBLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings class InstructBlipQFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores_dtype = attention_scores.dtype if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class InstructBlipQFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class InstructBlipQFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention) self.output = InstructBlipQFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.attention(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class InstructBlipQFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class InstructBlipQFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class InstructBlipQFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = InstructBlipQFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = InstructBlipQFormerIntermediate(config) self.output = InstructBlipQFormerOutput(config) self.intermediate_query = InstructBlipQFormerIntermediate(config) self.output_query = InstructBlipQFormerOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError('encoder_hidden_states must be given for cross-attention layers') cross_attention_outputs = self.crossattention(query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions) query_attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class InstructBlipQFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, 'gradient_checkpointing', False) and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class InstructBlipQFormerEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids.to(embeddings.device)) embeddings = embeddings + position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = embeddings.to(self.layernorm.weight.dtype) embeddings = self.layernorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class InstructBlipQFormerModel(InstructBlipPreTrainedModel): def __init__(self, config: InstructBlipQFormerConfig): super().__init__(config) self.config = config self.embeddings = InstructBlipQFormerEmbeddings(config) self.encoder = InstructBlipQFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and query_embeds is None: raise ValueError('You have to specify query_embeds when input_ids is None') past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds, past_key_values_length=past_key_values_length) input_shape = embedding_output.size()[:-1] (batch_size, seq_length) = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states[0].size() else: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n ', INSTRUCTBLIP_START_DOCSTRING) class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel): config_class = InstructBlipConfig main_input_name = 'pixel_values' def __init__(self, config: InstructBlipConfig): super().__init__(config) self.vision_model = InstructBlipVisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = InstructBlipQFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config, attn_implementation=config._attn_implementation) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config, attn_implementation=config._attn_implementation) if language_model._no_split_modules is not None: self._no_split_modules.extend(language_model._no_split_modules) if language_model._keep_in_fp32_modules is not None: self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules) self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1): logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.') if hasattr(self.language_model, '_hf_hook'): self.language_model._hf_hook.io_same_device = True @add_start_docstrings_to_model_forward(INSTRUCTBLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=InstructBlipForConditionalGenerationModelOutput, config_class=InstructBlipVisionConfig) def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, InstructBlipForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_output = query_outputs[0][:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_model_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'image_token_index', None) is not None: special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your InstructBLIP model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return (loss,) + output if loss is not None else output return InstructBlipForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() batch_size = pixel_values.shape[0] image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state[:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) if input_ids is None: input_ids = torch.LongTensor([[self.config.text_config.bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if attention_mask is None: attention_mask = torch.ones_like(input_ids) inputs_embeds = self.get_input_embeddings()(input_ids) if getattr(self.config, 'image_token_index', None) is not None: special_image_mask = (input_ids == self.config.image_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your InstructBLIP model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) if not self.language_model.config.is_encoder_decoder: generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1 generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1] outputs = self.language_model.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generate_kwargs) if not self.language_model.config.is_encoder_decoder: bos_token_id = 2 if self.config.text_config.architectures[0] == 'LLaMAForCausalLM' else self.config.text_config.bos_token_id bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if not isinstance(outputs, torch.Tensor): outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1) else: outputs = torch.cat([bos_tokens, outputs], dim=-1) return outputs # File: transformers-main/src/transformers/models/instructblip/processing_instructblip.py """""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType, logging from ..auto import AutoTokenizer logger = logging.get_logger(__name__) class InstructBlipProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer', 'qformer_tokenizer'] valid_kwargs = ['num_query_tokens'] image_processor_class = 'BlipImageProcessor' tokenizer_class = 'AutoTokenizer' qformer_tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs): self.image_token = AddedToken('', normalized=False, special=True) tokenizer.add_tokens([self.image_token], special_tokens=True) self.num_query_tokens = num_query_tokens super().__init__(image_processor, tokenizer, qformer_tokenizer) def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_token_type_ids: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: if images is None and text is None: raise ValueError('You have to specify at least images or text.') encoding = BatchFeature() if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise ValueError('Invalid input text. Please provide a string, or a list of strings') _text_encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=None, **kwargs) if self.num_query_tokens is not None and images is not None: text_encoding = {} image_tokens = self.image_token.content * self.num_query_tokens image_token_encoding = self.tokenizer([image_tokens], add_special_tokens=False, return_tensors=None) for k in _text_encoding: text_encoding[k] = [img_encoding + txt_encoding for (img_encoding, txt_encoding) in zip(image_token_encoding[k], _text_encoding[k])] else: text_encoding = _text_encoding if images is not None: logger.warning_once('Expanding inputs for image tokens in InstructBLIP should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your InstructBLIP model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors) encoding.update(text_encoding) qformer_text_encoding = self.qformer_tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs) encoding['qformer_input_ids'] = qformer_text_encoding.pop('input_ids') encoding['qformer_attention_mask'] = qformer_text_encoding.pop('attention_mask') if images is not None: image_encoding = self.image_processor(images, return_tensors=return_tensors) encoding.update(image_encoding) return encoding def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) def save_pretrained(self, save_directory, **kwargs): if os.path.isfile(save_directory): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file') os.makedirs(save_directory, exist_ok=True) qformer_tokenizer_path = os.path.join(save_directory, 'qformer_tokenizer') self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path) qformer_present = 'qformer_tokenizer' in self.attributes if qformer_present: self.attributes.remove('qformer_tokenizer') outputs = super().save_pretrained(save_directory, **kwargs) if qformer_present: self.attributes += ['qformer_tokenizer'] return outputs @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs) if isinstance(processor, tuple): processor = processor[0] qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='qformer_tokenizer') processor.qformer_tokenizer = qformer_tokenizer return processor # File: transformers-main/src/transformers/models/instructblipvideo/__init__.py from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _import_structure = {'configuration_instructblipvideo': ['InstructBlipVideoConfig', 'InstructBlipVideoQFormerConfig', 'InstructBlipVideoVisionConfig'], 'processing_instructblipvideo': ['InstructBlipVideoProcessor']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['image_processing_instructblipvideo'] = ['InstructBlipVideoImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure['modeling_instructblipvideo'] = ['InstructBlipVideoQFormerModel', 'InstructBlipVideoPreTrainedModel', 'InstructBlipVideoForConditionalGeneration', 'InstructBlipVideoVisionModel'] if TYPE_CHECKING: from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig from .processing_instructblipvideo import InstructBlipVideoProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_instructblipvideo import InstructBlipVideoImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblipvideo import InstructBlipVideoForConditionalGeneration, InstructBlipVideoPreTrainedModel, InstructBlipVideoQFormerModel, InstructBlipVideoVisionModel else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) # File: transformers-main/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class InstructBlipVideoVisionConfig(PretrainedConfig): model_type = 'instructblipvideo_vision_model' def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.qkv_bias = qkv_bias @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'instructblipvideo': config_dict = config_dict['vision_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class InstructBlipVideoQFormerConfig(PretrainedConfig): model_type = 'instructblipvideo_qformer' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.cross_attention_frequency = cross_attention_frequency self.encoder_hidden_size = encoder_hidden_size @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig': cls._set_token_in_kwargs(kwargs) (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if config_dict.get('model_type') == 'instructblipvideo': config_dict = config_dict['qformer_config'] if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs) class InstructBlipVideoConfig(PretrainedConfig): model_type = 'instructblipvideo' def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs): super().__init__(**kwargs) if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.') if qformer_config is None: qformer_config = {} logger.info('qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.') if text_config is None: text_config = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') self.vision_config = InstructBlipVideoVisionConfig(**vision_config) self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config) text_model_type = text_config['model_type'] if 'model_type' in text_config else 'opt' self.text_config = CONFIG_MAPPING[text_model_type](**text_config) self.tie_word_embeddings = self.text_config.tie_word_embeddings self.is_encoder_decoder = self.text_config.is_encoder_decoder self.num_query_tokens = num_query_tokens self.video_token_index = video_token_index self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES self.initializer_factor = 1.0 self.initializer_range = 0.02 @classmethod def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs): return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs) # File: transformers-main/src/transformers/models/instructblipvideo/convert_instructblipvideo_original_to_pytorch.py """""" import argparse import requests import torch from lavis.models import load_model_and_preprocess from PIL import Image from transformers import AutoTokenizer, BlipImageProcessor, InstructBlipProcessor, InstructBlipVideoConfig, InstructBlipVideoForConditionalGeneration, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig, LlamaConfig, LlamaTokenizerFast, T5Config, T5TokenizerFast from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def load_demo_image(): url = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' image = Image.open(requests.get(url, stream=True).raw).convert('RGB') return image def create_rename_keys(config): rename_keys = [] rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding')) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding')) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight')) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias')) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight')) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias')) for i in range(config.vision_config.num_hidden_layers): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight')) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight')) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias')) return rename_keys def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def read_in_q_v_bias(state_dict, config): for i in range(config.vision_config.num_hidden_layers): q_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias') v_bias = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias') qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) state_dict[f'vision_model.encoder.layers.{i}.self_attn.qkv.bias'] = qkv_bias def get_blip2_config(model_name): image_size = 364 if 'coco' in model_name else 224 vision_config = InstructBlipVideoVisionConfig(image_size=image_size).to_dict() if 't5-xl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 't5-xxl' in model_name: text_config = T5Config.from_pretrained('google/flan-t5-xxl', dense_act_fn='gelu', bos_token_id=1).to_dict() elif 'vicuna-7b' in model_name: text_config = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf', vocab_size=32001).to_dict() elif 'vicuna-13b' in model_name: text_config = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf', vocab_size=32001).to_dict() else: raise ValueError('Model name not supported') qformer_config = InstructBlipVideoQFormerConfig(vocab_size=30523).to_dict() config = InstructBlipVideoConfig(vision_config=vision_config, text_config=text_config, qformer_config=qformer_config) return (config, image_size) @torch.no_grad() def convert_blip2_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): qformer_tokenizer = AutoTokenizer.from_pretrained('google-bert/bert-base-uncased', truncation_side='left') qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'}) if 't5' in model_name: tokenizer = T5TokenizerFast.from_pretrained('google/flan-t5-xl', truncation_side='left') elif 'vicuna' in model_name: tokenizer = LlamaTokenizerFast.from_pretrained('huggyllama/llama-7b', truncation_side='left', bos_token='', unk_token='') tokenizer.add_special_tokens({'pad_token': '[PAD]'}) (config, image_size) = get_blip2_config(model_name) hf_model = InstructBlipVideoForConditionalGeneration(config).eval() model_name_to_original = {'instructblipvideo-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblipvideo-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblipvideo-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblipvideo-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl')} (name, type) = model_name_to_original[model_name] print('Loading original model...') hf_model_device = 'cuda:1' if torch.cuda.is_available() else 'cpu' lavis_device = 'cuda:2' if torch.cuda.is_available() else 'cpu' (original_model, vis_processors, _) = load_model_and_preprocess(name=name, model_type=type, is_eval=True, device=lavis_device) original_model.eval() print('Done!') state_dict = original_model.state_dict() rename_keys = create_rename_keys(config) for (src, dest) in rename_keys: rename_key(state_dict, src, dest) for (key, val) in state_dict.copy().items(): val = state_dict.pop(key) if key.startswith('Qformer.bert'): key = key.replace('Qformer.bert', 'qformer') if 'attention.self' in key: key = key.replace('self', 'attention') if 'llm_proj' in key: key = key.replace('llm_proj', 'language_projection') if 't5_proj' in key: key = key.replace('t5_proj', 'language_projection') if key.startswith('llm_model'): key = key.replace('llm_model', 'language_model') if key.startswith('t5'): key = key.replace('t5', 'language') state_dict[key] = val read_in_q_v_bias(state_dict, config) hf_model.load_state_dict(state_dict, strict=True) image = load_demo_image() prompt = 'What is unusual about this image?' image_processor = BlipImageProcessor(size={'height': image_size, 'width': image_size}, image_mean=OPENAI_CLIP_MEAN, image_std=OPENAI_CLIP_STD) processor = InstructBlipProcessor(image_processor=image_processor, tokenizer=tokenizer, qformer_tokenizer=qformer_tokenizer) inputs = processor(images=image, text=prompt, return_tensors='pt').to(hf_model_device) original_pixel_values = vis_processors['eval'](image).unsqueeze(0).to(lavis_device) pixel_values = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device), pixel_values) original_model.to(lavis_device) hf_model.to(hf_model_device) with torch.no_grad(): if 'vicuna' in model_name: original_logits = original_model({'image': original_pixel_values, 'text_input': [prompt]}).logits logits = hf_model(**inputs).logits else: original_logits = original_model({'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']}).logits label_input_ids = tokenizer('\n', return_tensors='pt').input_ids.to(hf_model_device) labels = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100) logits = hf_model(**inputs, labels=labels).logits print('First values of original logits:', original_logits[0, :3, :3]) print('First values of HF logits:', logits[0, :3, :3]) assert original_logits.shape == logits.shape atol = 0.0001 if 'vicuna' in model_name else 1e-05 assert torch.allclose(original_logits.to(logits.device), logits, atol=atol) print('Looks ok!') print('Generating with original model...') original_outputs = original_model.generate({'image': original_pixel_values, 'prompt': prompt}, num_beams=5) print('Generating with HF model...') outputs = hf_model.generate(**inputs, do_sample=False, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1) if 'vicuna' in model_name: outputs[outputs == 0] = 2 print('Original generation:', original_outputs) output_text = processor.batch_decode(outputs, skip_special_tokens=True) output_text = [text.strip() for text in output_text] print('HF generation:', output_text) if pytorch_dump_folder_path is not None: processor.save_pretrained(pytorch_dump_folder_path) hf_model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: processor.push_to_hub(f'Salesforce/{model_name}') hf_model.push_to_hub(f'Salesforce/{model_name}') if __name__ == '__main__': parser = argparse.ArgumentParser() choices = ['instructblipvideo-vicuna-7b', 'instructblipvideo-vicuna-13b', 'instructblipvideo-flan-t5-xl', 'instructblipvideo-flan-t5-xxl'] parser.add_argument('--model_name', default='instructblipvideo-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting') args = parser.parse_args() convert_blip2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) # File: transformers-main/src/transformers/models/instructblipvideo/diff_instructblipvideo.py from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch.nn import CrossEntropyLoss from transformers.models.instructblip.configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig from transformers.models.instructblip.modeling_instructblip import InstructBlipAttention, InstructBlipEncoder, InstructBlipEncoderLayer, InstructBlipForConditionalGeneration, InstructBlipForConditionalGenerationModelOutput, InstructBlipMLP, InstructBlipPreTrainedModel, InstructBlipQFormerAttention, InstructBlipQFormerEmbeddings, InstructBlipQFormerEncoder, InstructBlipQFormerIntermediate, InstructBlipQFormerLayer, InstructBlipQFormerModel, InstructBlipQFormerOutput, InstructBlipQFormerSelfOutput, InstructBlipVisionEmbeddings, InstructBlipVisionModel from ...utils import logging logger = logging.get_logger(__name__) class InstructBlipVideoVisionConfig(InstructBlipVisionConfig): pass class InstructBlipVideoQFormerConfig(InstructBlipQFormerConfig): pass class InstructBlipVideoConfig(InstructBlipConfig): pass @dataclass class InstructBlipVideoForConditionalGenerationModelOutput(InstructBlipForConditionalGenerationModelOutput): pass class InstructBlipVideoVisionEmbeddings(InstructBlipVisionEmbeddings): pass class InstructBlipVideoAttention(InstructBlipAttention): pass class InstructBlipVideoMLP(InstructBlipMLP): pass class InstructBlipVideoEncoderLayer(InstructBlipEncoderLayer): pass class InstructBlipVideoPreTrainedModel(InstructBlipPreTrainedModel): pass class InstructBlipVideoEncoder(InstructBlipEncoder): pass class InstructBlipVideoVisionModel(InstructBlipVisionModel): pass class InstructBlipVideoQFormerSelfOutput(InstructBlipQFormerSelfOutput): pass class InstructBlipVideoQFormerAttention(InstructBlipQFormerAttention): pass class InstructBlipVideoQFormerIntermediate(InstructBlipQFormerIntermediate): pass class InstructBlipVideoQFormerOutput(InstructBlipQFormerOutput): pass class InstructBlipVideoQFormerLayer(InstructBlipQFormerLayer): pass class InstructBlipVideoQFormerEncoder(InstructBlipQFormerEncoder): pass class InstructBlipVideoQFormerEmbeddings(InstructBlipQFormerEmbeddings): pass class InstructBlipVideoQFormerModel(InstructBlipQFormerModel): pass class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration): def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, InstructBlipVideoForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, frames, channel, height, width) = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_output = query_outputs[0][:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_model_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'video_token_index', None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return (loss,) + output if loss is not None else output return InstructBlipVideoForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() (batch_size, frames, channel, height, width) = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state[:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) if input_ids is None: input_ids = torch.LongTensor([[self.config.text_config.bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if attention_mask is None: attention_mask = torch.ones_like(input_ids) inputs_embeds = self.get_input_embeddings()(input_ids) if getattr(self.config, 'video_token_index', None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) if not self.language_model.config.is_encoder_decoder: generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1 generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1] outputs = self.language_model.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generate_kwargs) if not self.language_model.config.is_encoder_decoder: bos_token_id = 2 if self.config.text_config.architectures[0] == 'LLaMAForCausalLM' else self.config.text_config.bos_token_id bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if not isinstance(outputs, torch.Tensor): outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1) else: outputs = torch.cat([bos_tokens, outputs], dim=-1) return outputs # File: transformers-main/src/transformers/models/instructblipvideo/image_processing_instructblipvideo.py """""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, VideoInput, infer_channel_dimension_format, is_scaled_image, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) def make_batched_videos(videos) -> List[VideoInput]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): if isinstance(videos[0], PIL.Image.Image): return [videos] elif len(videos[0].shape) == 4: return [list(video) for video in videos] elif is_valid_image(videos) and len(videos.shape) == 4: return [list(videos)] raise ValueError(f'Could not make batched video from {videos}') class InstructBlipVideoImageProcessor(BaseImageProcessor): model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Dict[str, int]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'height': 384, 'width': 384} size = get_size_dict(size, default_to_square=True) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: size = get_size_dict(size) if 'height' not in size or 'width' not in size: raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') output_size = (size['height'], size['width']) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: VideoInput=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: bool=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) videos = make_batched_videos(images) validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if not valid_images(videos): raise ValueError('Invalid input type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.') pixel_values = [[self._preprocess_image(image=frame, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format) for frame in video] for video in videos] encoded_outputs = BatchFeature(data={'pixel_values': pixel_values}, tensor_type=return_tensors) return encoded_outputs def _preprocess_image(self, image: ImageInput=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: if do_convert_rgb: image = convert_to_rgb(image) image = to_numpy_array(image) if is_scaled_image(image) and do_rescale: logger.warning_once('It looks like you are trying to rescale already rescaled video frames. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image # File: transformers-main/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from ..auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig logger = logging.get_logger(__name__) @dataclass class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput): loss: Optional[Tuple[torch.FloatTensor]] = None logits: Optional[Tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None def to_tuple(self) -> Tuple[Any]: return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys())) class InstructBlipVideoVisionEmbeddings(nn.Module): def __init__(self, config: InstructBlipVideoVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: (batch_size, _, height, width) = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype) return embeddings class InstructBlipVideoAttention(nn.Module): def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = nn.Dropout(config.attention_dropout) self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: (bsz, tgt_len, embed_dim) = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4) (query_states, key_states, value_states) = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]) attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs class InstructBlipVideoMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class InstructBlipVideoEncoderLayer(nn.Module): def __init__(self, config: InstructBlipVideoConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = InstructBlipVideoAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = InstructBlipVideoMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) (hidden_states, attn_weights) = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class InstructBlipVideoPreTrainedModel(PreTrainedModel): config_class = InstructBlipVideoConfig base_model_prefix = 'blip' supports_gradient_checkpointing = True _no_split_modules = ['InstructBlipVideoQFormerEmbeddings', 'InstructBlipVideoAttention', 'InstructBlipVideoQFormerMultiHeadAttention', 'InstructBlipVideoQFormerSelfOutput'] _keep_in_fp32_modules = [] def _init_weights(self, module): factor = self.config.initializer_range if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if hasattr(module, 'bias') and module.bias is not None: module.bias.data.zero_() if isinstance(module, InstructBlipVideoVisionEmbeddings): if hasattr(self.config, 'vision_config') and (not isinstance(self.config, InstructBlipVideoVisionConfig)): factor = self.config.vision_config.initializer_range nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() INSTRUCTBLIPVIDEO_VISION_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`InstructBlipVideoProcessor`]. See\n [`InstructBlipVideoProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' class InstructBlipVideoEncoder(nn.Module): def __init__(self, config: InstructBlipVideoConfig): super().__init__() self.config = config self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for (idx, encoder_layer) in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) INSTRUCTBLIPVIDEO_START_DOCSTRING = '\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`InstructBlipVideoConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' INSTRUCTBLIPVIDEO_INPUTS_DOCSTRING = '\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`InstructBlipVideoProcessor`]. See\n [`InstructBlipVideoProcessor.__call__`] for details.\n\n qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided\n to serve as text prompt, which the Q-Former model will encode.\n\n Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for\n details.\n\n [What are input IDs?](../glossary#input-ids)\n\n qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be\n provided to serve as text prompt, which the language model can continue.\n\n Indices can be obtained using [`InstructBlipVideoProcessor`]. See [`InstructBlipVideoProcessor.__call__`] for\n details.\n\n [What are input IDs?](../glossary#input-ids)\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an\n encoder-decoder language model (like T5) is used.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n Only relevant in case an encoder-decoder language model (like T5) is used.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\n' class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel): main_input_name = 'pixel_values' config_class = InstructBlipVideoVisionConfig def __init__(self, config: InstructBlipVideoVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = InstructBlipVideoVisionEmbeddings(config) self.encoder = InstructBlipVideoEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @add_start_docstrings_to_model_forward(INSTRUCTBLIPVIDEO_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=InstructBlipVideoVisionConfig) def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions) def get_input_embeddings(self): return self.embeddings class InstructBlipVideoQFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False): is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores_dtype = attention_scores.dtype if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) attention_probs_dropped = self.dropout(attention_probs) if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) outputs = outputs + (past_key_value,) return outputs class InstructBlipVideoQFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class InstructBlipVideoQFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = InstructBlipVideoQFormerMultiHeadAttention(config, is_cross_attention) self.output = InstructBlipVideoQFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return (heads, index) = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads) self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, output_attentions: Optional[bool]=False) -> Tuple[torch.Tensor]: self_outputs = self.attention(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs class InstructBlipVideoQFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class InstructBlipVideoQFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class InstructBlipVideoQFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = InstructBlipVideoQFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = InstructBlipVideoQFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = InstructBlipVideoQFormerIntermediate(config) self.output = InstructBlipVideoQFormerOutput(config) self.intermediate_query = InstructBlipVideoQFormerIntermediate(config) self.output_query = InstructBlipVideoQFormerOutput(config) def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0): self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError('encoder_hidden_states must be given for cross-attention layers') cross_attention_outputs = self.crossattention(query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions) query_attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class InstructBlipVideoQFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([InstructBlipVideoQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, 'gradient_checkpointing', False) and self.training: if use_cache: logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...') use_cache = False layer_outputs = self._gradient_checkpointing_func(layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask) else: layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions) class InstructBlipVideoQFormerEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.config = config def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids.to(embeddings.device)) embeddings = embeddings + position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = embeddings.to(self.layernorm.weight.dtype) embeddings = self.layernorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel): def __init__(self, config: InstructBlipVideoQFormerConfig): super().__init__(config) self.config = config self.embeddings = InstructBlipVideoQFormerEmbeddings(config) self.encoder = InstructBlipVideoQFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): for (layer, heads) in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor: if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})') extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and query_embeds is None: raise ValueError('You have to specify query_embeds when input_ids is None') past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds, past_key_values_length=past_key_values_length) input_shape = embedding_output.size()[:-1] (batch_size, seq_length) = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states[0].size() else: (encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions) @add_start_docstrings('\n Instructblipvideo Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n ', INSTRUCTBLIPVIDEO_START_DOCSTRING) class InstructBlipVideoForConditionalGeneration(InstructBlipVideoPreTrainedModel): config_class = InstructBlipVideoConfig main_input_name = 'pixel_values' def __init__(self, config: InstructBlipVideoConfig): super().__init__(config) self.vision_model = InstructBlipVideoVisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = InstructBlipVideoQFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config, attn_implementation=config._attn_implementation) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config, attn_implementation=config._attn_implementation) if language_model._no_split_modules is not None: self._no_split_modules.extend(language_model._no_split_modules) if language_model._keep_in_fp32_modules is not None: self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules) self.language_model = language_model self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1): logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.') if hasattr(self.language_model, '_hf_hook'): self.language_model._hf_hook.io_same_device = True @add_start_docstrings_to_model_forward(INSTRUCTBLIPVIDEO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=InstructBlipVideoForConditionalGenerationModelOutput, config_class=InstructBlipVideoVisionConfig) def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> Union[Tuple, InstructBlipVideoForConditionalGenerationModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict (batch_size, frames, channel, height, width) = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) query_output = query_outputs[0][:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_model_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) if getattr(self.config, 'video_token_index', None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_model_attention_mask, attention_mask.to(language_model_attention_mask.device)], dim=1) if self.config.use_decoder_only_language_model: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: labels = labels.to(logits.device) logits = logits[:, -labels.size(1):, :] shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous().to(logits.device) loss_fct = CrossEntropyLoss(reduction='mean') loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) else: outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] if not return_dict: output = (logits, vision_outputs, query_outputs, outputs) return (loss,) + output if loss is not None else output return InstructBlipVideoForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs) @torch.no_grad() def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor: if hasattr(self, 'hf_device_map'): self._preprocess_accelerate() (batch_size, frames, channel, height, width) = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width) image_embeds = self.vision_model(pixel_values, return_dict=True, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0) qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True) query_output = query_outputs.last_hidden_state[:, :query_tokens.size(1), :] language_model_inputs = self.language_projection(query_output) language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1) language_attention_mask = torch.ones(language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device) if input_ids is None: input_ids = torch.LongTensor([[self.config.text_config.bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if attention_mask is None: attention_mask = torch.ones_like(input_ids) inputs_embeds = self.get_input_embeddings()(input_ids) if getattr(self.config, 'video_token_index', None) is not None: special_image_mask = (input_ids == self.config.video_token_index).unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds[special_image_mask] = language_model_inputs.flatten() else: logger.warning_once('Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.') inputs_embeds = torch.cat([language_model_inputs, inputs_embeds.to(language_model_inputs.device)], dim=1) attention_mask = torch.cat([language_attention_mask, attention_mask.to(language_attention_mask.device)], dim=1) if not self.language_model.config.is_encoder_decoder: generate_kwargs['max_length'] = generate_kwargs.get('max_length', 20) + language_model_inputs.shape[1] - 1 generate_kwargs['min_length'] = generate_kwargs.get('min_length', 0) + language_model_inputs.shape[1] outputs = self.language_model.generate(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **generate_kwargs) if not self.language_model.config.is_encoder_decoder: bos_token_id = 2 if self.config.text_config.architectures[0] == 'LLaMAForCausalLM' else self.config.text_config.bos_token_id bos_tokens = torch.LongTensor([[bos_token_id]]).repeat(batch_size, 1).to(image_embeds.device) if not isinstance(outputs, torch.Tensor): outputs.sequences = torch.cat([bos_tokens, outputs.sequences], dim=-1) else: outputs = torch.cat([bos_tokens, outputs], dim=-1) return outputs # File: transformers-main/src/transformers/models/instructblipvideo/processing_instructblipvideo.py """""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import VideoInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType, logging from ..auto import AutoTokenizer logger = logging.get_logger(__name__) class InstructBlipVideoProcessor(ProcessorMixin): attributes = ['image_processor', 'tokenizer', 'qformer_tokenizer'] valid_kwargs = ['num_query_tokens'] image_processor_class = 'InstructBlipVideoImageProcessor' tokenizer_class = 'AutoTokenizer' qformer_tokenizer_class = 'AutoTokenizer' def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs): self.video_token = AddedToken('